repo_name
stringclasses
6 values
pr_number
int64
512
78.9k
pr_title
stringlengths
3
144
pr_description
stringlengths
0
30.3k
author
stringlengths
2
21
date_created
timestamp[ns, tz=UTC]
date_merged
timestamp[ns, tz=UTC]
previous_commit
stringlengths
40
40
pr_commit
stringlengths
40
40
query
stringlengths
17
30.4k
filepath
stringlengths
9
210
before_content
stringlengths
0
112M
after_content
stringlengths
0
112M
label
int64
-1
1
dotnet/runtime
66,372
Add Stopwatch.GetElapsedTime
Fixes https://github.com/dotnet/runtime/issues/65858
stephentoub
2022-03-09T01:52:28Z
2022-03-09T12:42:15Z
ca731545a58307870a0baebb0ee43eeea61f175f
c9f7f7389e8e9a00d501aef696333b67d218baac
Add Stopwatch.GetElapsedTime. Fixes https://github.com/dotnet/runtime/issues/65858
./src/tests/JIT/HardwareIntrinsics/X86/Avx2/AlignRight.SByte.27.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.X86; namespace JIT.HardwareIntrinsics.X86 { public static partial class Program { private static void AlignRightSByte27() { var test = new ImmBinaryOpTest__AlignRightSByte27(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (Avx.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); // Validates basic functionality works, using LoadAligned test.RunBasicScenario_LoadAligned(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (Avx.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); // Validates calling via reflection works, using LoadAligned test.RunReflectionScenario_LoadAligned(); } // Validates passing a static member works test.RunClsVarScenario(); // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (Avx.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); // Validates passing a local works, using LoadAligned test.RunLclVarScenario_LoadAligned(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); // Validates passing an instance member of a class works test.RunClassFldScenario(); // Validates passing the field of a local struct works test.RunStructLclFldScenario(); // Validates passing an instance member of a struct works test.RunStructFldScenario(); } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class ImmBinaryOpTest__AlignRightSByte27 { private struct TestStruct { public Vector256<SByte> _fld1; public Vector256<SByte> _fld2; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<SByte>, byte>(ref testStruct._fld1), ref Unsafe.As<SByte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<SByte>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<SByte>, byte>(ref testStruct._fld2), ref Unsafe.As<SByte, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector256<SByte>>()); return testStruct; } public void RunStructFldScenario(ImmBinaryOpTest__AlignRightSByte27 testClass) { var result = Avx2.AlignRight(_fld1, _fld2, 27); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } } private static readonly int LargestVectorSize = 32; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector256<SByte>>() / sizeof(SByte); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector256<SByte>>() / sizeof(SByte); private static readonly int RetElementCount = Unsafe.SizeOf<Vector256<SByte>>() / sizeof(SByte); private static SByte[] _data1 = new SByte[Op1ElementCount]; private static SByte[] _data2 = new SByte[Op2ElementCount]; private static Vector256<SByte> _clsVar1; private static Vector256<SByte> _clsVar2; private Vector256<SByte> _fld1; private Vector256<SByte> _fld2; private SimpleBinaryOpTest__DataTable<SByte, SByte, SByte> _dataTable; static ImmBinaryOpTest__AlignRightSByte27() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<SByte>, byte>(ref _clsVar1), ref Unsafe.As<SByte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<SByte>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<SByte>, byte>(ref _clsVar2), ref Unsafe.As<SByte, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector256<SByte>>()); } public ImmBinaryOpTest__AlignRightSByte27() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<SByte>, byte>(ref _fld1), ref Unsafe.As<SByte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<SByte>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<SByte>, byte>(ref _fld2), ref Unsafe.As<SByte, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector256<SByte>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSByte(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); } _dataTable = new SimpleBinaryOpTest__DataTable<SByte, SByte, SByte>(_data1, _data2, new SByte[RetElementCount], LargestVectorSize); } public bool IsSupported => Avx2.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = Avx2.AlignRight( Unsafe.Read<Vector256<SByte>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector256<SByte>>(_dataTable.inArray2Ptr), 27 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = Avx2.AlignRight( Avx.LoadVector256((SByte*)(_dataTable.inArray1Ptr)), Avx.LoadVector256((SByte*)(_dataTable.inArray2Ptr)), 27 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_LoadAligned() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_LoadAligned)); var result = Avx2.AlignRight( Avx.LoadAlignedVector256((SByte*)(_dataTable.inArray1Ptr)), Avx.LoadAlignedVector256((SByte*)(_dataTable.inArray2Ptr)), 27 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(Avx2).GetMethod(nameof(Avx2.AlignRight), new Type[] { typeof(Vector256<SByte>), typeof(Vector256<SByte>), typeof(byte) }) .Invoke(null, new object[] { Unsafe.Read<Vector256<SByte>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector256<SByte>>(_dataTable.inArray2Ptr), (byte)27 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector256<SByte>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(Avx2).GetMethod(nameof(Avx2.AlignRight), new Type[] { typeof(Vector256<SByte>), typeof(Vector256<SByte>), typeof(byte) }) .Invoke(null, new object[] { Avx.LoadVector256((SByte*)(_dataTable.inArray1Ptr)), Avx.LoadVector256((SByte*)(_dataTable.inArray2Ptr)), (byte)27 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector256<SByte>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_LoadAligned() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_LoadAligned)); var result = typeof(Avx2).GetMethod(nameof(Avx2.AlignRight), new Type[] { typeof(Vector256<SByte>), typeof(Vector256<SByte>), typeof(byte) }) .Invoke(null, new object[] { Avx.LoadAlignedVector256((SByte*)(_dataTable.inArray1Ptr)), Avx.LoadAlignedVector256((SByte*)(_dataTable.inArray2Ptr)), (byte)27 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector256<SByte>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = Avx2.AlignRight( _clsVar1, _clsVar2, 27 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var left = Unsafe.Read<Vector256<SByte>>(_dataTable.inArray1Ptr); var right = Unsafe.Read<Vector256<SByte>>(_dataTable.inArray2Ptr); var result = Avx2.AlignRight(left, right, 27); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(left, right, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var left = Avx.LoadVector256((SByte*)(_dataTable.inArray1Ptr)); var right = Avx.LoadVector256((SByte*)(_dataTable.inArray2Ptr)); var result = Avx2.AlignRight(left, right, 27); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(left, right, _dataTable.outArrayPtr); } public void RunLclVarScenario_LoadAligned() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_LoadAligned)); var left = Avx.LoadAlignedVector256((SByte*)(_dataTable.inArray1Ptr)); var right = Avx.LoadAlignedVector256((SByte*)(_dataTable.inArray2Ptr)); var result = Avx2.AlignRight(left, right, 27); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(left, right, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new ImmBinaryOpTest__AlignRightSByte27(); var result = Avx2.AlignRight(test._fld1, test._fld2, 27); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = Avx2.AlignRight(_fld1, _fld2, 27); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = Avx2.AlignRight(test._fld1, test._fld2, 27); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector256<SByte> left, Vector256<SByte> right, void* result, [CallerMemberName] string method = "") { SByte[] inArray1 = new SByte[Op1ElementCount]; SByte[] inArray2 = new SByte[Op2ElementCount]; SByte[] outArray = new SByte[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<SByte, byte>(ref inArray1[0]), left); Unsafe.WriteUnaligned(ref Unsafe.As<SByte, byte>(ref inArray2[0]), right); Unsafe.CopyBlockUnaligned(ref Unsafe.As<SByte, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector256<SByte>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(void* left, void* right, void* result, [CallerMemberName] string method = "") { SByte[] inArray1 = new SByte[Op1ElementCount]; SByte[] inArray2 = new SByte[Op2ElementCount]; SByte[] outArray = new SByte[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<SByte, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(left), (uint)Unsafe.SizeOf<Vector256<SByte>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<SByte, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(right), (uint)Unsafe.SizeOf<Vector256<SByte>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<SByte, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector256<SByte>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(SByte[] left, SByte[] right, SByte[] result, [CallerMemberName] string method = "") { bool succeeded = true; if (result[0] != left[11]) { succeeded = false; } else { for (var i = 1; i < RetElementCount; i++) { if ((result[i] != ((i < 16) ? ((i < 5) ? left[i + 11] : 0) : ((i < 21) ? left[i + 11] : 0)))) { succeeded = false; break; } } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(Avx2)}.{nameof(Avx2.AlignRight)}<SByte>(Vector256<SByte>.27, Vector256<SByte>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})"); TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.X86; namespace JIT.HardwareIntrinsics.X86 { public static partial class Program { private static void AlignRightSByte27() { var test = new ImmBinaryOpTest__AlignRightSByte27(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (Avx.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); // Validates basic functionality works, using LoadAligned test.RunBasicScenario_LoadAligned(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (Avx.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); // Validates calling via reflection works, using LoadAligned test.RunReflectionScenario_LoadAligned(); } // Validates passing a static member works test.RunClsVarScenario(); // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (Avx.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); // Validates passing a local works, using LoadAligned test.RunLclVarScenario_LoadAligned(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); // Validates passing an instance member of a class works test.RunClassFldScenario(); // Validates passing the field of a local struct works test.RunStructLclFldScenario(); // Validates passing an instance member of a struct works test.RunStructFldScenario(); } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class ImmBinaryOpTest__AlignRightSByte27 { private struct TestStruct { public Vector256<SByte> _fld1; public Vector256<SByte> _fld2; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<SByte>, byte>(ref testStruct._fld1), ref Unsafe.As<SByte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<SByte>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<SByte>, byte>(ref testStruct._fld2), ref Unsafe.As<SByte, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector256<SByte>>()); return testStruct; } public void RunStructFldScenario(ImmBinaryOpTest__AlignRightSByte27 testClass) { var result = Avx2.AlignRight(_fld1, _fld2, 27); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } } private static readonly int LargestVectorSize = 32; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector256<SByte>>() / sizeof(SByte); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector256<SByte>>() / sizeof(SByte); private static readonly int RetElementCount = Unsafe.SizeOf<Vector256<SByte>>() / sizeof(SByte); private static SByte[] _data1 = new SByte[Op1ElementCount]; private static SByte[] _data2 = new SByte[Op2ElementCount]; private static Vector256<SByte> _clsVar1; private static Vector256<SByte> _clsVar2; private Vector256<SByte> _fld1; private Vector256<SByte> _fld2; private SimpleBinaryOpTest__DataTable<SByte, SByte, SByte> _dataTable; static ImmBinaryOpTest__AlignRightSByte27() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<SByte>, byte>(ref _clsVar1), ref Unsafe.As<SByte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<SByte>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<SByte>, byte>(ref _clsVar2), ref Unsafe.As<SByte, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector256<SByte>>()); } public ImmBinaryOpTest__AlignRightSByte27() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<SByte>, byte>(ref _fld1), ref Unsafe.As<SByte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<SByte>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<SByte>, byte>(ref _fld2), ref Unsafe.As<SByte, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector256<SByte>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSByte(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); } _dataTable = new SimpleBinaryOpTest__DataTable<SByte, SByte, SByte>(_data1, _data2, new SByte[RetElementCount], LargestVectorSize); } public bool IsSupported => Avx2.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = Avx2.AlignRight( Unsafe.Read<Vector256<SByte>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector256<SByte>>(_dataTable.inArray2Ptr), 27 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = Avx2.AlignRight( Avx.LoadVector256((SByte*)(_dataTable.inArray1Ptr)), Avx.LoadVector256((SByte*)(_dataTable.inArray2Ptr)), 27 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_LoadAligned() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_LoadAligned)); var result = Avx2.AlignRight( Avx.LoadAlignedVector256((SByte*)(_dataTable.inArray1Ptr)), Avx.LoadAlignedVector256((SByte*)(_dataTable.inArray2Ptr)), 27 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(Avx2).GetMethod(nameof(Avx2.AlignRight), new Type[] { typeof(Vector256<SByte>), typeof(Vector256<SByte>), typeof(byte) }) .Invoke(null, new object[] { Unsafe.Read<Vector256<SByte>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector256<SByte>>(_dataTable.inArray2Ptr), (byte)27 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector256<SByte>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(Avx2).GetMethod(nameof(Avx2.AlignRight), new Type[] { typeof(Vector256<SByte>), typeof(Vector256<SByte>), typeof(byte) }) .Invoke(null, new object[] { Avx.LoadVector256((SByte*)(_dataTable.inArray1Ptr)), Avx.LoadVector256((SByte*)(_dataTable.inArray2Ptr)), (byte)27 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector256<SByte>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_LoadAligned() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_LoadAligned)); var result = typeof(Avx2).GetMethod(nameof(Avx2.AlignRight), new Type[] { typeof(Vector256<SByte>), typeof(Vector256<SByte>), typeof(byte) }) .Invoke(null, new object[] { Avx.LoadAlignedVector256((SByte*)(_dataTable.inArray1Ptr)), Avx.LoadAlignedVector256((SByte*)(_dataTable.inArray2Ptr)), (byte)27 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector256<SByte>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = Avx2.AlignRight( _clsVar1, _clsVar2, 27 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var left = Unsafe.Read<Vector256<SByte>>(_dataTable.inArray1Ptr); var right = Unsafe.Read<Vector256<SByte>>(_dataTable.inArray2Ptr); var result = Avx2.AlignRight(left, right, 27); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(left, right, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var left = Avx.LoadVector256((SByte*)(_dataTable.inArray1Ptr)); var right = Avx.LoadVector256((SByte*)(_dataTable.inArray2Ptr)); var result = Avx2.AlignRight(left, right, 27); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(left, right, _dataTable.outArrayPtr); } public void RunLclVarScenario_LoadAligned() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_LoadAligned)); var left = Avx.LoadAlignedVector256((SByte*)(_dataTable.inArray1Ptr)); var right = Avx.LoadAlignedVector256((SByte*)(_dataTable.inArray2Ptr)); var result = Avx2.AlignRight(left, right, 27); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(left, right, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new ImmBinaryOpTest__AlignRightSByte27(); var result = Avx2.AlignRight(test._fld1, test._fld2, 27); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = Avx2.AlignRight(_fld1, _fld2, 27); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = Avx2.AlignRight(test._fld1, test._fld2, 27); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector256<SByte> left, Vector256<SByte> right, void* result, [CallerMemberName] string method = "") { SByte[] inArray1 = new SByte[Op1ElementCount]; SByte[] inArray2 = new SByte[Op2ElementCount]; SByte[] outArray = new SByte[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<SByte, byte>(ref inArray1[0]), left); Unsafe.WriteUnaligned(ref Unsafe.As<SByte, byte>(ref inArray2[0]), right); Unsafe.CopyBlockUnaligned(ref Unsafe.As<SByte, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector256<SByte>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(void* left, void* right, void* result, [CallerMemberName] string method = "") { SByte[] inArray1 = new SByte[Op1ElementCount]; SByte[] inArray2 = new SByte[Op2ElementCount]; SByte[] outArray = new SByte[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<SByte, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(left), (uint)Unsafe.SizeOf<Vector256<SByte>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<SByte, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(right), (uint)Unsafe.SizeOf<Vector256<SByte>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<SByte, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector256<SByte>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(SByte[] left, SByte[] right, SByte[] result, [CallerMemberName] string method = "") { bool succeeded = true; if (result[0] != left[11]) { succeeded = false; } else { for (var i = 1; i < RetElementCount; i++) { if ((result[i] != ((i < 16) ? ((i < 5) ? left[i + 11] : 0) : ((i < 21) ? left[i + 11] : 0)))) { succeeded = false; break; } } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(Avx2)}.{nameof(Avx2.AlignRight)}<SByte>(Vector256<SByte>.27, Vector256<SByte>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})"); TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
-1
dotnet/runtime
66,372
Add Stopwatch.GetElapsedTime
Fixes https://github.com/dotnet/runtime/issues/65858
stephentoub
2022-03-09T01:52:28Z
2022-03-09T12:42:15Z
ca731545a58307870a0baebb0ee43eeea61f175f
c9f7f7389e8e9a00d501aef696333b67d218baac
Add Stopwatch.GetElapsedTime. Fixes https://github.com/dotnet/runtime/issues/65858
./src/tests/JIT/Methodical/Boxing/xlang/sin_il_cs_il_d.ilproj
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>Full</DebugType> </PropertyGroup> <ItemGroup> <Compile Include="sin_cs.il" /> </ItemGroup> <ItemGroup> <ProjectReference Include="sinlib_cs.csproj" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>Full</DebugType> </PropertyGroup> <ItemGroup> <Compile Include="sin_cs.il" /> </ItemGroup> <ItemGroup> <ProjectReference Include="sinlib_cs.csproj" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,372
Add Stopwatch.GetElapsedTime
Fixes https://github.com/dotnet/runtime/issues/65858
stephentoub
2022-03-09T01:52:28Z
2022-03-09T12:42:15Z
ca731545a58307870a0baebb0ee43eeea61f175f
c9f7f7389e8e9a00d501aef696333b67d218baac
Add Stopwatch.GetElapsedTime. Fixes https://github.com/dotnet/runtime/issues/65858
./src/libraries/System.Private.CoreLib/src/System/Collections/Concurrent/ConcurrentQueue.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.Diagnostics; using System.Diagnostics.CodeAnalysis; using System.Numerics; using System.Threading; namespace System.Collections.Concurrent { /// <summary> /// Represents a thread-safe first-in, first-out collection of objects. /// </summary> /// <typeparam name="T">Specifies the type of elements in the queue.</typeparam> /// <remarks> /// All public and protected members of <see cref="ConcurrentQueue{T}"/> are thread-safe and may be used /// concurrently from multiple threads. /// </remarks> [DebuggerDisplay("Count = {Count}")] [DebuggerTypeProxy(typeof(IProducerConsumerCollectionDebugView<>))] public class ConcurrentQueue<T> : IProducerConsumerCollection<T>, IReadOnlyCollection<T> { // This implementation provides an unbounded, multi-producer multi-consumer queue // that supports the standard Enqueue/TryDequeue operations, as well as support for // snapshot enumeration (GetEnumerator, ToArray, CopyTo), peeking, and Count/IsEmpty. // It is composed of a linked list of bounded ring buffers, each of which has a head // and a tail index, isolated from each other to minimize false sharing. As long as // the number of elements in the queue remains less than the size of the current // buffer (Segment), no additional allocations are required for enqueued items. When // the number of items exceeds the size of the current segment, the current segment is // "frozen" to prevent further enqueues, and a new segment is linked from it and set // as the new tail segment for subsequent enqueues. As old segments are consumed by // dequeues, the head reference is updated to point to the segment that dequeuers should // try next. To support snapshot enumeration, segments also support the notion of // preserving for observation, whereby they avoid overwriting state as part of dequeues. // Any operation that requires a snapshot results in all current segments being // both frozen for enqueues and preserved for observation: any new enqueues will go // to new segments, and dequeuers will consume from the existing segments but without // overwriting the existing data. /// <summary>Initial length of the segments used in the queue.</summary> private const int InitialSegmentLength = 32; /// <summary> /// Maximum length of the segments used in the queue. This is a somewhat arbitrary limit: /// larger means that as long as we don't exceed the size, we avoid allocating more segments, /// but if we do exceed it, then the segment becomes garbage. /// </summary> private const int MaxSegmentLength = 1024 * 1024; /// <summary> /// Lock used to protect cross-segment operations, including any updates to <see cref="_tail"/> or <see cref="_head"/> /// and any operations that need to get a consistent view of them. /// </summary> private readonly object _crossSegmentLock; /// <summary>The current tail segment.</summary> private volatile ConcurrentQueueSegment<T> _tail; /// <summary>The current head segment.</summary> private volatile ConcurrentQueueSegment<T> _head; // SOS's ThreadPool command depends on this name /// <summary> /// Initializes a new instance of the <see cref="ConcurrentQueue{T}"/> class. /// </summary> public ConcurrentQueue() { _crossSegmentLock = new object(); _tail = _head = new ConcurrentQueueSegment<T>(InitialSegmentLength); } /// <summary> /// Initializes a new instance of the <see cref="ConcurrentQueue{T}"/> class that contains elements copied /// from the specified collection. /// </summary> /// <param name="collection"> /// The collection whose elements are copied to the new <see cref="ConcurrentQueue{T}"/>. /// </param> /// <exception cref="System.ArgumentNullException">The <paramref name="collection"/> argument is null.</exception> public ConcurrentQueue(IEnumerable<T> collection) { if (collection == null) { ThrowHelper.ThrowArgumentNullException(ExceptionArgument.collection); } _crossSegmentLock = new object(); // Determine the initial segment size. We'll use the default, // unless the collection is known to be larger than that, in which // case we round its length up to a power of 2, as all segments must // be a power of 2 in length. int length = InitialSegmentLength; if (collection is ICollection<T> c) { int count = c.Count; if (count > length) { length = (int)Math.Min(BitOperations.RoundUpToPowerOf2((uint)count), MaxSegmentLength); } } // Initialize the segment and add all of the data to it. _tail = _head = new ConcurrentQueueSegment<T>(length); foreach (T item in collection) { Enqueue(item); } } /// <summary> /// Copies the elements of the <see cref="ICollection"/> to an <see /// cref="Array"/>, starting at a particular <see cref="Array"/> index. /// </summary> /// <param name="array"> /// The one-dimensional <see cref="Array">Array</see> that is the destination of the /// elements copied from the <see cref="ConcurrentQueue{T}"/>. <paramref name="array"/> must have /// zero-based indexing. /// </param> /// <param name="index">The zero-based index in <paramref name="array"/> at which copying begins.</param> /// <exception cref="ArgumentNullException"><paramref name="array"/> is a null reference (Nothing in /// Visual Basic).</exception> /// <exception cref="ArgumentOutOfRangeException"><paramref name="index"/> is less than /// zero.</exception> /// <exception cref="ArgumentException"> /// <paramref name="array"/> is multidimensional. -or- /// <paramref name="array"/> does not have zero-based indexing. -or- /// <paramref name="index"/> is equal to or greater than the length of the <paramref name="array"/> /// -or- The number of elements in the source <see cref="ICollection"/> is /// greater than the available space from <paramref name="index"/> to the end of the destination /// <paramref name="array"/>. -or- The type of the source <see /// cref="ICollection"/> cannot be cast automatically to the type of the /// destination <paramref name="array"/>. /// </exception> void ICollection.CopyTo(Array array, int index) { // Special-case when the Array is actually a T[], taking a faster path if (array is T[] szArray) { CopyTo(szArray, index); return; } // Validate arguments. if (array == null) { ThrowHelper.ThrowArgumentNullException(ExceptionArgument.array); } // Otherwise, fall back to the slower path that first copies the contents // to an array, and then uses that array's non-generic CopyTo to do the copy. ToArray().CopyTo(array, index); } /// <summary> /// Gets a value indicating whether access to the <see cref="ICollection"/> is /// synchronized with the SyncRoot. /// </summary> /// <value>true if access to the <see cref="ICollection"/> is synchronized /// with the SyncRoot; otherwise, false. For <see cref="ConcurrentQueue{T}"/>, this property always /// returns false.</value> bool ICollection.IsSynchronized => false; // always false, as true implies synchronization via SyncRoot /// <summary> /// Gets an object that can be used to synchronize access to the <see /// cref="ICollection"/>. This property is not supported. /// </summary> /// <exception cref="NotSupportedException">The SyncRoot property is not supported.</exception> object ICollection.SyncRoot { get { ThrowHelper.ThrowNotSupportedException(ExceptionResource.ConcurrentCollection_SyncRoot_NotSupported); return default; } } /// <summary>Returns an enumerator that iterates through a collection.</summary> /// <returns>An <see cref="IEnumerator"/> that can be used to iterate through the collection.</returns> IEnumerator IEnumerable.GetEnumerator() => ((IEnumerable<T>)this).GetEnumerator(); /// <summary> /// Attempts to add an object to the <see cref="Concurrent.IProducerConsumerCollection{T}"/>. /// </summary> /// <param name="item">The object to add to the <see /// cref="Concurrent.IProducerConsumerCollection{T}"/>. The value can be a null /// reference (Nothing in Visual Basic) for reference types. /// </param> /// <returns>true if the object was added successfully; otherwise, false.</returns> /// <remarks>For <see cref="ConcurrentQueue{T}"/>, this operation will always add the object to the /// end of the <see cref="ConcurrentQueue{T}"/> /// and return true.</remarks> bool IProducerConsumerCollection<T>.TryAdd(T item) { Enqueue(item); return true; } /// <summary> /// Attempts to remove and return an object from the <see cref="Concurrent.IProducerConsumerCollection{T}"/>. /// </summary> /// <param name="item"> /// When this method returns, if the operation was successful, <paramref name="item"/> contains the /// object removed. If no object was available to be removed, the value is unspecified. /// </param> /// <returns>true if an element was removed and returned successfully; otherwise, false.</returns> /// <remarks>For <see cref="ConcurrentQueue{T}"/>, this operation will attempt to remove the object /// from the beginning of the <see cref="ConcurrentQueue{T}"/>. /// </remarks> bool IProducerConsumerCollection<T>.TryTake([MaybeNullWhen(false)] out T item) => TryDequeue(out item); /// <summary> /// Gets a value that indicates whether the <see cref="ConcurrentQueue{T}"/> is empty. /// </summary> /// <value>true if the <see cref="ConcurrentQueue{T}"/> is empty; otherwise, false.</value> /// <remarks> /// For determining whether the collection contains any items, use of this property is recommended /// rather than retrieving the number of items from the <see cref="Count"/> property and comparing it /// to 0. However, as this collection is intended to be accessed concurrently, it may be the case /// that another thread will modify the collection after <see cref="IsEmpty"/> returns, thus invalidating /// the result. /// </remarks> public bool IsEmpty => // IsEmpty == !TryPeek. We use a "resultUsed:false" peek in order to avoid marking // segments as preserved for observation, making IsEmpty a cheaper way than either // TryPeek(out T) or Count == 0 to check whether any elements are in the queue. !TryPeek(out _, resultUsed: false); /// <summary>Copies the elements stored in the <see cref="ConcurrentQueue{T}"/> to a new array.</summary> /// <returns>A new array containing a snapshot of elements copied from the <see cref="ConcurrentQueue{T}"/>.</returns> public T[] ToArray() { // Snap the current contents for enumeration. SnapForObservation(out ConcurrentQueueSegment<T> head, out int headHead, out ConcurrentQueueSegment<T> tail, out int tailTail); // Count the number of items in that snapped set, and use it to allocate an // array of the right size. long count = GetCount(head, headHead, tail, tailTail); T[] arr = new T[count]; // Now enumerate the contents, copying each element into the array. using (IEnumerator<T> e = Enumerate(head, headHead, tail, tailTail)) { int i = 0; while (e.MoveNext()) { arr[i++] = e.Current; } Debug.Assert(count == i); } // And return it. return arr; } /// <summary> /// Gets the number of elements contained in the <see cref="ConcurrentQueue{T}"/>. /// </summary> /// <value>The number of elements contained in the <see cref="ConcurrentQueue{T}"/>.</value> /// <remarks> /// For determining whether the collection contains any items, use of the <see cref="IsEmpty"/> /// property is recommended rather than retrieving the number of items from the <see cref="Count"/> /// property and comparing it to 0. /// </remarks> public int Count { get { SpinWait spinner = default; while (true) { // Capture the head and tail, as well as the head's head and tail. ConcurrentQueueSegment<T> head = _head; ConcurrentQueueSegment<T> tail = _tail; int headHead = Volatile.Read(ref head._headAndTail.Head); int headTail = Volatile.Read(ref head._headAndTail.Tail); if (head == tail) { // There was a single segment in the queue. If the captured segments still // match, then we can trust the values to compute the segment's count. (It's // theoretically possible the values could have looped around and still exactly match, // but that would required at least ~4 billion elements to have been enqueued and // dequeued between the reads.) if (head == _head && tail == _tail && headHead == Volatile.Read(ref head._headAndTail.Head) && headTail == Volatile.Read(ref head._headAndTail.Tail)) { return GetCount(head, headHead, headTail); } } else if (head._nextSegment == tail) { // There were two segments in the queue. Get the positions from the tail, and as above, // if the captured values match the previous reads, return the sum of the counts from both segments. int tailHead = Volatile.Read(ref tail._headAndTail.Head); int tailTail = Volatile.Read(ref tail._headAndTail.Tail); if (head == _head && tail == _tail && headHead == Volatile.Read(ref head._headAndTail.Head) && headTail == Volatile.Read(ref head._headAndTail.Tail) && tailHead == Volatile.Read(ref tail._headAndTail.Head) && tailTail == Volatile.Read(ref tail._headAndTail.Tail)) { return GetCount(head, headHead, headTail) + GetCount(tail, tailHead, tailTail); } } else { // There were more than two segments in the queue. Fall back to taking the cross-segment lock, // which will ensure that the head and tail segments we read are stable (since the lock is needed to change them); // for the two-segment case above, we can simply rely on subsequent comparisons, but for the two+ case, we need // to be able to trust the internal segments between the head and tail. lock (_crossSegmentLock) { // Now that we hold the lock, re-read the previously captured head and tail segments and head positions. // If either has changed, start over. if (head == _head && tail == _tail) { // Get the positions from the tail, and as above, if the captured values match the previous reads, // we can use the values to compute the count of the head and tail segments. int tailHead = Volatile.Read(ref tail._headAndTail.Head); int tailTail = Volatile.Read(ref tail._headAndTail.Tail); if (headHead == Volatile.Read(ref head._headAndTail.Head) && headTail == Volatile.Read(ref head._headAndTail.Tail) && tailHead == Volatile.Read(ref tail._headAndTail.Head) && tailTail == Volatile.Read(ref tail._headAndTail.Tail)) { // We got stable values for the head and tail segments, so we can just compute the sizes // based on those and add them. Note that this and the below additions to count may overflow: previous // implementations allowed that, so we don't check, either, and it is theoretically possible for the // queue to store more than int.MaxValue items. int count = GetCount(head, headHead, headTail) + GetCount(tail, tailHead, tailTail); // Now add the counts for each internal segment. Since there were segments before these, // for counting purposes we consider them to start at the 0th element, and since there is at // least one segment after each, each was frozen, so we can count until each's frozen tail. // With the cross-segment lock held, we're guaranteed that all of these internal segments are // consistent, as the head and tail segment can't be changed while we're holding the lock, and // dequeueing and enqueueing can only be done from the head and tail segments, which these aren't. for (ConcurrentQueueSegment<T> s = head._nextSegment!; s != tail; s = s._nextSegment!) { Debug.Assert(s._frozenForEnqueues, "Internal segment must be frozen as there's a following segment."); count += s._headAndTail.Tail - s.FreezeOffset; } return count; } } } } // We raced with enqueues/dequeues and captured an inconsistent picture of the queue. // Spin and try again. spinner.SpinOnce(); } } } /// <summary>Computes the number of items in a segment based on a fixed head and tail in that segment.</summary> private static int GetCount(ConcurrentQueueSegment<T> s, int head, int tail) { if (head != tail && head != tail - s.FreezeOffset) { head &= s._slotsMask; tail &= s._slotsMask; return head < tail ? tail - head : s._slots.Length - head + tail; } return 0; } /// <summary>Gets the number of items in snapped region.</summary> private static long GetCount(ConcurrentQueueSegment<T> head, int headHead, ConcurrentQueueSegment<T> tail, int tailTail) { // All of the segments should have been both frozen for enqueues and preserved for observation. // Validate that here for head and tail; we'll validate it for intermediate segments later. Debug.Assert(head._preservedForObservation); Debug.Assert(head._frozenForEnqueues); Debug.Assert(tail._preservedForObservation); Debug.Assert(tail._frozenForEnqueues); long count = 0; // Head segment. We've already marked it as frozen for enqueues, so its tail position is fixed, // and we've already marked it as preserved for observation (before we grabbed the head), so we // can safely enumerate from its head to its tail and access its elements. int headTail = (head == tail ? tailTail : Volatile.Read(ref head._headAndTail.Tail)) - head.FreezeOffset; if (headHead < headTail) { // Mask the head and tail for the head segment headHead &= head._slotsMask; headTail &= head._slotsMask; // Increase the count by either the one or two regions, based on whether tail // has wrapped to be less than head. count += headHead < headTail ? headTail - headHead : head._slots.Length - headHead + headTail; } // We've enumerated the head. If the tail is different from the head, we need to // enumerate the remaining segments. if (head != tail) { // Count the contents of each segment between head and tail, not including head and tail. // Since there were segments before these, for our purposes we consider them to start at // the 0th element, and since there is at least one segment after each, each was frozen // by the time we snapped it, so we can iterate until each's frozen tail. for (ConcurrentQueueSegment<T> s = head._nextSegment!; s != tail; s = s._nextSegment!) { Debug.Assert(s._preservedForObservation); Debug.Assert(s._frozenForEnqueues); count += s._headAndTail.Tail - s.FreezeOffset; } // Finally, enumerate the tail. As with the intermediate segments, there were segments // before this in the snapped region, so we can start counting from the beginning. Unlike // the intermediate segments, we can't just go until the Tail, as that could still be changing; // instead we need to go until the tail we snapped for observation. count += tailTail - tail.FreezeOffset; } // Return the computed count. return count; } /// <summary> /// Copies the <see cref="ConcurrentQueue{T}"/> elements to an existing one-dimensional <see /// cref="Array">Array</see>, starting at the specified array index. /// </summary> /// <param name="array">The one-dimensional <see cref="Array">Array</see> that is the /// destination of the elements copied from the /// <see cref="ConcurrentQueue{T}"/>. The <see cref="Array">Array</see> must have zero-based /// indexing.</param> /// <param name="index">The zero-based index in <paramref name="array"/> at which copying /// begins.</param> /// <exception cref="ArgumentNullException"><paramref name="array"/> is a null reference (Nothing in /// Visual Basic).</exception> /// <exception cref="ArgumentOutOfRangeException"><paramref name="index"/> is less than /// zero.</exception> /// <exception cref="ArgumentException"><paramref name="index"/> is equal to or greater than the /// length of the <paramref name="array"/> /// -or- The number of elements in the source <see cref="ConcurrentQueue{T}"/> is greater than the /// available space from <paramref name="index"/> to the end of the destination <paramref /// name="array"/>. /// </exception> public void CopyTo(T[] array, int index) { if (array == null) { ThrowHelper.ThrowArgumentNullException(ExceptionArgument.array); } if (index < 0) { ThrowHelper.ThrowArgumentOutOfRangeException(ExceptionArgument.index); } // Snap for enumeration SnapForObservation(out ConcurrentQueueSegment<T> head, out int headHead, out ConcurrentQueueSegment<T> tail, out int tailTail); // Get the number of items to be enumerated long count = GetCount(head, headHead, tail, tailTail); if (index > array.Length - count) { ThrowHelper.ThrowArgumentException(ExceptionResource.Arg_ArrayPlusOffTooSmall); } // Copy the items to the target array int i = index; using (IEnumerator<T> e = Enumerate(head, headHead, tail, tailTail)) { while (e.MoveNext()) { array[i++] = e.Current; } } Debug.Assert(count == i - index); } /// <summary>Returns an enumerator that iterates through the <see cref="ConcurrentQueue{T}"/>.</summary> /// <returns>An enumerator for the contents of the <see /// cref="ConcurrentQueue{T}"/>.</returns> /// <remarks> /// The enumeration represents a moment-in-time snapshot of the contents /// of the queue. It does not reflect any updates to the collection after /// <see cref="GetEnumerator"/> was called. The enumerator is safe to use /// concurrently with reads from and writes to the queue. /// </remarks> public IEnumerator<T> GetEnumerator() { SnapForObservation(out ConcurrentQueueSegment<T> head, out int headHead, out ConcurrentQueueSegment<T> tail, out int tailTail); return Enumerate(head, headHead, tail, tailTail); } /// <summary> /// Gets the head and tail information of the current contents of the queue. /// After this call returns, the specified region can be enumerated any number /// of times and will not change. /// </summary> private void SnapForObservation(out ConcurrentQueueSegment<T> head, out int headHead, out ConcurrentQueueSegment<T> tail, out int tailTail) { lock (_crossSegmentLock) // _head and _tail may only change while the lock is held. { // Snap the head and tail head = _head; tail = _tail; Debug.Assert(head != null); Debug.Assert(tail != null); Debug.Assert(tail._nextSegment == null); // Mark them and all segments in between as preserving, and ensure no additional items // can be added to the tail. for (ConcurrentQueueSegment<T> s = head; ; s = s._nextSegment!) { s._preservedForObservation = true; if (s == tail) break; Debug.Assert(s._frozenForEnqueues); // any non-tail should already be marked } tail.EnsureFrozenForEnqueues(); // we want to prevent the tailTail from moving // At this point, any dequeues from any segment won't overwrite the value, and // none of the existing segments can have new items enqueued. headHead = Volatile.Read(ref head._headAndTail.Head); tailTail = Volatile.Read(ref tail._headAndTail.Tail); } } /// <summary>Gets the item stored in the <paramref name="i"/>th entry in <paramref name="segment"/>.</summary> private static T GetItemWhenAvailable(ConcurrentQueueSegment<T> segment, int i) { Debug.Assert(segment._preservedForObservation); // Get the expected value for the sequence number int expectedSequenceNumberAndMask = (i + 1) & segment._slotsMask; // If the expected sequence number is not yet written, we're still waiting for // an enqueuer to finish storing it. Spin until it's there. if ((segment._slots[i].SequenceNumber & segment._slotsMask) != expectedSequenceNumberAndMask) { SpinWait spinner = default; while ((Volatile.Read(ref segment._slots[i].SequenceNumber) & segment._slotsMask) != expectedSequenceNumberAndMask) { spinner.SpinOnce(); } } // Return the value from the slot. return segment._slots[i].Item!; } private static IEnumerator<T> Enumerate(ConcurrentQueueSegment<T> head, int headHead, ConcurrentQueueSegment<T> tail, int tailTail) { Debug.Assert(head._preservedForObservation); Debug.Assert(head._frozenForEnqueues); Debug.Assert(tail._preservedForObservation); Debug.Assert(tail._frozenForEnqueues); // Head segment. We've already marked it as not accepting any more enqueues, // so its tail position is fixed, and we've already marked it as preserved for // enumeration (before we grabbed its head), so we can safely enumerate from // its head to its tail. int headTail = (head == tail ? tailTail : Volatile.Read(ref head._headAndTail.Tail)) - head.FreezeOffset; if (headHead < headTail) { headHead &= head._slotsMask; headTail &= head._slotsMask; if (headHead < headTail) { for (int i = headHead; i < headTail; i++) yield return GetItemWhenAvailable(head, i); } else { for (int i = headHead; i < head._slots.Length; i++) yield return GetItemWhenAvailable(head, i); for (int i = 0; i < headTail; i++) yield return GetItemWhenAvailable(head, i); } } // We've enumerated the head. If the tail is the same, we're done. if (head != tail) { // Each segment between head and tail, not including head and tail. Since there were // segments before these, for our purposes we consider it to start at the 0th element. for (ConcurrentQueueSegment<T> s = head._nextSegment!; s != tail; s = s._nextSegment!) { Debug.Assert(s._preservedForObservation, "Would have had to been preserved as a segment part of enumeration"); Debug.Assert(s._frozenForEnqueues, "Would have had to be frozen for enqueues as it's intermediate"); int sTail = s._headAndTail.Tail - s.FreezeOffset; for (int i = 0; i < sTail; i++) { yield return GetItemWhenAvailable(s, i); } } // Enumerate the tail. Since there were segments before this, we can just start at // its beginning, and iterate until the tail we already grabbed. tailTail -= tail.FreezeOffset; for (int i = 0; i < tailTail; i++) { yield return GetItemWhenAvailable(tail, i); } } } /// <summary>Adds an object to the end of the <see cref="ConcurrentQueue{T}"/>.</summary> /// <param name="item"> /// The object to add to the end of the <see cref="ConcurrentQueue{T}"/>. /// The value can be a null reference (Nothing in Visual Basic) for reference types. /// </param> public void Enqueue(T item) { // Try to enqueue to the current tail. if (!_tail.TryEnqueue(item)) { // If we're unable to, we need to take a slow path that will // try to add a new tail segment. EnqueueSlow(item); } } /// <summary>Adds to the end of the queue, adding a new segment if necessary.</summary> private void EnqueueSlow(T item) { while (true) { ConcurrentQueueSegment<T> tail = _tail; // Try to append to the existing tail. if (tail.TryEnqueue(item)) { return; } // If we were unsuccessful, take the lock so that we can compare and manipulate // the tail. Assuming another enqueuer hasn't already added a new segment, // do so, then loop around to try enqueueing again. lock (_crossSegmentLock) { if (tail == _tail) { // Make sure no one else can enqueue to this segment. tail.EnsureFrozenForEnqueues(); // We determine the new segment's length based on the old length. // In general, we double the size of the segment, to make it less likely // that we'll need to grow again. However, if the tail segment is marked // as preserved for observation, something caused us to avoid reusing this // segment, and if that happens a lot and we grow, we'll end up allocating // lots of wasted space. As such, in such situations we reset back to the // initial segment length; if these observations are happening frequently, // this will help to avoid wasted memory, and if they're not, we'll // relatively quickly grow again to a larger size. int nextSize = tail._preservedForObservation ? InitialSegmentLength : Math.Min(tail.Capacity * 2, MaxSegmentLength); var newTail = new ConcurrentQueueSegment<T>(nextSize); // Hook up the new tail. tail._nextSegment = newTail; _tail = newTail; } } } } /// <summary> /// Attempts to remove and return the object at the beginning of the <see /// cref="ConcurrentQueue{T}"/>. /// </summary> /// <param name="result"> /// When this method returns, if the operation was successful, <paramref name="result"/> contains the /// object removed. If no object was available to be removed, the value is unspecified. /// </param> /// <returns> /// true if an element was removed and returned from the beginning of the /// <see cref="ConcurrentQueue{T}"/> successfully; otherwise, false. /// </returns> public bool TryDequeue([MaybeNullWhen(false)] out T result) { // Get the current head ConcurrentQueueSegment<T> head = _head; // Try to take. If we're successful, we're done. if (head.TryDequeue(out result)) { return true; } // Check to see whether this segment is the last. If it is, we can consider // this to be a moment-in-time empty condition (even though between the TryDequeue // check and this check, another item could have arrived). if (head._nextSegment == null) { result = default!; return false; } return TryDequeueSlow(out result); // slow path that needs to fix up segments } /// <summary>Tries to dequeue an item, removing empty segments as needed.</summary> private bool TryDequeueSlow([MaybeNullWhen(false)] out T item) { while (true) { // Get the current head ConcurrentQueueSegment<T> head = _head; // Try to take. If we're successful, we're done. if (head.TryDequeue(out item)) { return true; } // Check to see whether this segment is the last. If it is, we can consider // this to be a moment-in-time empty condition (even though between the TryDequeue // check and this check, another item could have arrived). if (head._nextSegment == null) { item = default; return false; } // At this point we know that head.Next != null, which means // this segment has been frozen for additional enqueues. But between // the time that we ran TryDequeue and checked for a next segment, // another item could have been added. Try to dequeue one more time // to confirm that the segment is indeed empty. Debug.Assert(head._frozenForEnqueues); if (head.TryDequeue(out item)) { return true; } // This segment is frozen (nothing more can be added) and empty (nothing is in it). // Update head to point to the next segment in the list, assuming no one's beat us to it. lock (_crossSegmentLock) { if (head == _head) { _head = head._nextSegment; } } } } /// <summary> /// Attempts to return an object from the beginning of the <see cref="ConcurrentQueue{T}"/> /// without removing it. /// </summary> /// <param name="result"> /// When this method returns, <paramref name="result"/> contains an object from /// the beginning of the <see cref="Concurrent.ConcurrentQueue{T}"/> or default(T) /// if the operation failed. /// </param> /// <returns>true if and object was returned successfully; otherwise, false.</returns> /// <remarks> /// For determining whether the collection contains any items, use of the <see cref="IsEmpty"/> /// property is recommended rather than peeking. /// </remarks> public bool TryPeek([MaybeNullWhen(false)] out T result) => TryPeek(out result, resultUsed: true); /// <summary>Attempts to retrieve the value for the first element in the queue.</summary> /// <param name="result">The value of the first element, if found.</param> /// <param name="resultUsed">true if the result is needed; otherwise false if only the true/false outcome is needed.</param> /// <returns>true if an element was found; otherwise, false.</returns> private bool TryPeek([MaybeNullWhen(false)] out T result, bool resultUsed) { // Starting with the head segment, look through all of the segments // for the first one we can find that's not empty. ConcurrentQueueSegment<T> s = _head; while (true) { // Grab the next segment from this one, before we peek. // This is to be able to see whether the value has changed // during the peek operation. ConcurrentQueueSegment<T>? next = Volatile.Read(ref s._nextSegment); // Peek at the segment. If we find an element, we're done. if (s.TryPeek(out result, resultUsed)) { return true; } // The current segment was empty at the moment we checked. if (next != null) { // If prior to the peek there was already a next segment, then // during the peek no additional items could have been enqueued // to it and we can just move on to check the next segment. Debug.Assert(next == s._nextSegment); s = next; } else if (Volatile.Read(ref s._nextSegment) == null) { // The next segment is null. Nothing more to peek at. break; } // The next segment was null before we peeked but non-null after. // That means either when we peeked the first segment had // already been frozen but the new segment not yet added, // or that the first segment was empty and between the time // that we peeked and then checked _nextSegment, so many items // were enqueued that we filled the first segment and went // into the next. Since we need to peek in order, we simply // loop around again to peek on the same segment. The next // time around on this segment we'll then either successfully // peek or we'll find that next was non-null before peeking, // and we'll traverse to that segment. } result = default; return false; } /// <summary> /// Removes all objects from the <see cref="ConcurrentQueue{T}"/>. /// </summary> public void Clear() { lock (_crossSegmentLock) { // Simply substitute a new segment for the existing head/tail, // as is done in the constructor. Operations currently in flight // may still read from or write to an existing segment that's // getting dropped, meaning that in flight operations may not be // linear with regards to this clear operation. To help mitigate // in-flight operations enqueuing onto the tail that's about to // be dropped, we first freeze it; that'll force enqueuers to take // this lock to synchronize and see the new tail. _tail.EnsureFrozenForEnqueues(); _tail = _head = new ConcurrentQueueSegment<T>(InitialSegmentLength); } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.Diagnostics; using System.Diagnostics.CodeAnalysis; using System.Numerics; using System.Threading; namespace System.Collections.Concurrent { /// <summary> /// Represents a thread-safe first-in, first-out collection of objects. /// </summary> /// <typeparam name="T">Specifies the type of elements in the queue.</typeparam> /// <remarks> /// All public and protected members of <see cref="ConcurrentQueue{T}"/> are thread-safe and may be used /// concurrently from multiple threads. /// </remarks> [DebuggerDisplay("Count = {Count}")] [DebuggerTypeProxy(typeof(IProducerConsumerCollectionDebugView<>))] public class ConcurrentQueue<T> : IProducerConsumerCollection<T>, IReadOnlyCollection<T> { // This implementation provides an unbounded, multi-producer multi-consumer queue // that supports the standard Enqueue/TryDequeue operations, as well as support for // snapshot enumeration (GetEnumerator, ToArray, CopyTo), peeking, and Count/IsEmpty. // It is composed of a linked list of bounded ring buffers, each of which has a head // and a tail index, isolated from each other to minimize false sharing. As long as // the number of elements in the queue remains less than the size of the current // buffer (Segment), no additional allocations are required for enqueued items. When // the number of items exceeds the size of the current segment, the current segment is // "frozen" to prevent further enqueues, and a new segment is linked from it and set // as the new tail segment for subsequent enqueues. As old segments are consumed by // dequeues, the head reference is updated to point to the segment that dequeuers should // try next. To support snapshot enumeration, segments also support the notion of // preserving for observation, whereby they avoid overwriting state as part of dequeues. // Any operation that requires a snapshot results in all current segments being // both frozen for enqueues and preserved for observation: any new enqueues will go // to new segments, and dequeuers will consume from the existing segments but without // overwriting the existing data. /// <summary>Initial length of the segments used in the queue.</summary> private const int InitialSegmentLength = 32; /// <summary> /// Maximum length of the segments used in the queue. This is a somewhat arbitrary limit: /// larger means that as long as we don't exceed the size, we avoid allocating more segments, /// but if we do exceed it, then the segment becomes garbage. /// </summary> private const int MaxSegmentLength = 1024 * 1024; /// <summary> /// Lock used to protect cross-segment operations, including any updates to <see cref="_tail"/> or <see cref="_head"/> /// and any operations that need to get a consistent view of them. /// </summary> private readonly object _crossSegmentLock; /// <summary>The current tail segment.</summary> private volatile ConcurrentQueueSegment<T> _tail; /// <summary>The current head segment.</summary> private volatile ConcurrentQueueSegment<T> _head; // SOS's ThreadPool command depends on this name /// <summary> /// Initializes a new instance of the <see cref="ConcurrentQueue{T}"/> class. /// </summary> public ConcurrentQueue() { _crossSegmentLock = new object(); _tail = _head = new ConcurrentQueueSegment<T>(InitialSegmentLength); } /// <summary> /// Initializes a new instance of the <see cref="ConcurrentQueue{T}"/> class that contains elements copied /// from the specified collection. /// </summary> /// <param name="collection"> /// The collection whose elements are copied to the new <see cref="ConcurrentQueue{T}"/>. /// </param> /// <exception cref="System.ArgumentNullException">The <paramref name="collection"/> argument is null.</exception> public ConcurrentQueue(IEnumerable<T> collection) { if (collection == null) { ThrowHelper.ThrowArgumentNullException(ExceptionArgument.collection); } _crossSegmentLock = new object(); // Determine the initial segment size. We'll use the default, // unless the collection is known to be larger than that, in which // case we round its length up to a power of 2, as all segments must // be a power of 2 in length. int length = InitialSegmentLength; if (collection is ICollection<T> c) { int count = c.Count; if (count > length) { length = (int)Math.Min(BitOperations.RoundUpToPowerOf2((uint)count), MaxSegmentLength); } } // Initialize the segment and add all of the data to it. _tail = _head = new ConcurrentQueueSegment<T>(length); foreach (T item in collection) { Enqueue(item); } } /// <summary> /// Copies the elements of the <see cref="ICollection"/> to an <see /// cref="Array"/>, starting at a particular <see cref="Array"/> index. /// </summary> /// <param name="array"> /// The one-dimensional <see cref="Array">Array</see> that is the destination of the /// elements copied from the <see cref="ConcurrentQueue{T}"/>. <paramref name="array"/> must have /// zero-based indexing. /// </param> /// <param name="index">The zero-based index in <paramref name="array"/> at which copying begins.</param> /// <exception cref="ArgumentNullException"><paramref name="array"/> is a null reference (Nothing in /// Visual Basic).</exception> /// <exception cref="ArgumentOutOfRangeException"><paramref name="index"/> is less than /// zero.</exception> /// <exception cref="ArgumentException"> /// <paramref name="array"/> is multidimensional. -or- /// <paramref name="array"/> does not have zero-based indexing. -or- /// <paramref name="index"/> is equal to or greater than the length of the <paramref name="array"/> /// -or- The number of elements in the source <see cref="ICollection"/> is /// greater than the available space from <paramref name="index"/> to the end of the destination /// <paramref name="array"/>. -or- The type of the source <see /// cref="ICollection"/> cannot be cast automatically to the type of the /// destination <paramref name="array"/>. /// </exception> void ICollection.CopyTo(Array array, int index) { // Special-case when the Array is actually a T[], taking a faster path if (array is T[] szArray) { CopyTo(szArray, index); return; } // Validate arguments. if (array == null) { ThrowHelper.ThrowArgumentNullException(ExceptionArgument.array); } // Otherwise, fall back to the slower path that first copies the contents // to an array, and then uses that array's non-generic CopyTo to do the copy. ToArray().CopyTo(array, index); } /// <summary> /// Gets a value indicating whether access to the <see cref="ICollection"/> is /// synchronized with the SyncRoot. /// </summary> /// <value>true if access to the <see cref="ICollection"/> is synchronized /// with the SyncRoot; otherwise, false. For <see cref="ConcurrentQueue{T}"/>, this property always /// returns false.</value> bool ICollection.IsSynchronized => false; // always false, as true implies synchronization via SyncRoot /// <summary> /// Gets an object that can be used to synchronize access to the <see /// cref="ICollection"/>. This property is not supported. /// </summary> /// <exception cref="NotSupportedException">The SyncRoot property is not supported.</exception> object ICollection.SyncRoot { get { ThrowHelper.ThrowNotSupportedException(ExceptionResource.ConcurrentCollection_SyncRoot_NotSupported); return default; } } /// <summary>Returns an enumerator that iterates through a collection.</summary> /// <returns>An <see cref="IEnumerator"/> that can be used to iterate through the collection.</returns> IEnumerator IEnumerable.GetEnumerator() => ((IEnumerable<T>)this).GetEnumerator(); /// <summary> /// Attempts to add an object to the <see cref="Concurrent.IProducerConsumerCollection{T}"/>. /// </summary> /// <param name="item">The object to add to the <see /// cref="Concurrent.IProducerConsumerCollection{T}"/>. The value can be a null /// reference (Nothing in Visual Basic) for reference types. /// </param> /// <returns>true if the object was added successfully; otherwise, false.</returns> /// <remarks>For <see cref="ConcurrentQueue{T}"/>, this operation will always add the object to the /// end of the <see cref="ConcurrentQueue{T}"/> /// and return true.</remarks> bool IProducerConsumerCollection<T>.TryAdd(T item) { Enqueue(item); return true; } /// <summary> /// Attempts to remove and return an object from the <see cref="Concurrent.IProducerConsumerCollection{T}"/>. /// </summary> /// <param name="item"> /// When this method returns, if the operation was successful, <paramref name="item"/> contains the /// object removed. If no object was available to be removed, the value is unspecified. /// </param> /// <returns>true if an element was removed and returned successfully; otherwise, false.</returns> /// <remarks>For <see cref="ConcurrentQueue{T}"/>, this operation will attempt to remove the object /// from the beginning of the <see cref="ConcurrentQueue{T}"/>. /// </remarks> bool IProducerConsumerCollection<T>.TryTake([MaybeNullWhen(false)] out T item) => TryDequeue(out item); /// <summary> /// Gets a value that indicates whether the <see cref="ConcurrentQueue{T}"/> is empty. /// </summary> /// <value>true if the <see cref="ConcurrentQueue{T}"/> is empty; otherwise, false.</value> /// <remarks> /// For determining whether the collection contains any items, use of this property is recommended /// rather than retrieving the number of items from the <see cref="Count"/> property and comparing it /// to 0. However, as this collection is intended to be accessed concurrently, it may be the case /// that another thread will modify the collection after <see cref="IsEmpty"/> returns, thus invalidating /// the result. /// </remarks> public bool IsEmpty => // IsEmpty == !TryPeek. We use a "resultUsed:false" peek in order to avoid marking // segments as preserved for observation, making IsEmpty a cheaper way than either // TryPeek(out T) or Count == 0 to check whether any elements are in the queue. !TryPeek(out _, resultUsed: false); /// <summary>Copies the elements stored in the <see cref="ConcurrentQueue{T}"/> to a new array.</summary> /// <returns>A new array containing a snapshot of elements copied from the <see cref="ConcurrentQueue{T}"/>.</returns> public T[] ToArray() { // Snap the current contents for enumeration. SnapForObservation(out ConcurrentQueueSegment<T> head, out int headHead, out ConcurrentQueueSegment<T> tail, out int tailTail); // Count the number of items in that snapped set, and use it to allocate an // array of the right size. long count = GetCount(head, headHead, tail, tailTail); T[] arr = new T[count]; // Now enumerate the contents, copying each element into the array. using (IEnumerator<T> e = Enumerate(head, headHead, tail, tailTail)) { int i = 0; while (e.MoveNext()) { arr[i++] = e.Current; } Debug.Assert(count == i); } // And return it. return arr; } /// <summary> /// Gets the number of elements contained in the <see cref="ConcurrentQueue{T}"/>. /// </summary> /// <value>The number of elements contained in the <see cref="ConcurrentQueue{T}"/>.</value> /// <remarks> /// For determining whether the collection contains any items, use of the <see cref="IsEmpty"/> /// property is recommended rather than retrieving the number of items from the <see cref="Count"/> /// property and comparing it to 0. /// </remarks> public int Count { get { SpinWait spinner = default; while (true) { // Capture the head and tail, as well as the head's head and tail. ConcurrentQueueSegment<T> head = _head; ConcurrentQueueSegment<T> tail = _tail; int headHead = Volatile.Read(ref head._headAndTail.Head); int headTail = Volatile.Read(ref head._headAndTail.Tail); if (head == tail) { // There was a single segment in the queue. If the captured segments still // match, then we can trust the values to compute the segment's count. (It's // theoretically possible the values could have looped around and still exactly match, // but that would required at least ~4 billion elements to have been enqueued and // dequeued between the reads.) if (head == _head && tail == _tail && headHead == Volatile.Read(ref head._headAndTail.Head) && headTail == Volatile.Read(ref head._headAndTail.Tail)) { return GetCount(head, headHead, headTail); } } else if (head._nextSegment == tail) { // There were two segments in the queue. Get the positions from the tail, and as above, // if the captured values match the previous reads, return the sum of the counts from both segments. int tailHead = Volatile.Read(ref tail._headAndTail.Head); int tailTail = Volatile.Read(ref tail._headAndTail.Tail); if (head == _head && tail == _tail && headHead == Volatile.Read(ref head._headAndTail.Head) && headTail == Volatile.Read(ref head._headAndTail.Tail) && tailHead == Volatile.Read(ref tail._headAndTail.Head) && tailTail == Volatile.Read(ref tail._headAndTail.Tail)) { return GetCount(head, headHead, headTail) + GetCount(tail, tailHead, tailTail); } } else { // There were more than two segments in the queue. Fall back to taking the cross-segment lock, // which will ensure that the head and tail segments we read are stable (since the lock is needed to change them); // for the two-segment case above, we can simply rely on subsequent comparisons, but for the two+ case, we need // to be able to trust the internal segments between the head and tail. lock (_crossSegmentLock) { // Now that we hold the lock, re-read the previously captured head and tail segments and head positions. // If either has changed, start over. if (head == _head && tail == _tail) { // Get the positions from the tail, and as above, if the captured values match the previous reads, // we can use the values to compute the count of the head and tail segments. int tailHead = Volatile.Read(ref tail._headAndTail.Head); int tailTail = Volatile.Read(ref tail._headAndTail.Tail); if (headHead == Volatile.Read(ref head._headAndTail.Head) && headTail == Volatile.Read(ref head._headAndTail.Tail) && tailHead == Volatile.Read(ref tail._headAndTail.Head) && tailTail == Volatile.Read(ref tail._headAndTail.Tail)) { // We got stable values for the head and tail segments, so we can just compute the sizes // based on those and add them. Note that this and the below additions to count may overflow: previous // implementations allowed that, so we don't check, either, and it is theoretically possible for the // queue to store more than int.MaxValue items. int count = GetCount(head, headHead, headTail) + GetCount(tail, tailHead, tailTail); // Now add the counts for each internal segment. Since there were segments before these, // for counting purposes we consider them to start at the 0th element, and since there is at // least one segment after each, each was frozen, so we can count until each's frozen tail. // With the cross-segment lock held, we're guaranteed that all of these internal segments are // consistent, as the head and tail segment can't be changed while we're holding the lock, and // dequeueing and enqueueing can only be done from the head and tail segments, which these aren't. for (ConcurrentQueueSegment<T> s = head._nextSegment!; s != tail; s = s._nextSegment!) { Debug.Assert(s._frozenForEnqueues, "Internal segment must be frozen as there's a following segment."); count += s._headAndTail.Tail - s.FreezeOffset; } return count; } } } } // We raced with enqueues/dequeues and captured an inconsistent picture of the queue. // Spin and try again. spinner.SpinOnce(); } } } /// <summary>Computes the number of items in a segment based on a fixed head and tail in that segment.</summary> private static int GetCount(ConcurrentQueueSegment<T> s, int head, int tail) { if (head != tail && head != tail - s.FreezeOffset) { head &= s._slotsMask; tail &= s._slotsMask; return head < tail ? tail - head : s._slots.Length - head + tail; } return 0; } /// <summary>Gets the number of items in snapped region.</summary> private static long GetCount(ConcurrentQueueSegment<T> head, int headHead, ConcurrentQueueSegment<T> tail, int tailTail) { // All of the segments should have been both frozen for enqueues and preserved for observation. // Validate that here for head and tail; we'll validate it for intermediate segments later. Debug.Assert(head._preservedForObservation); Debug.Assert(head._frozenForEnqueues); Debug.Assert(tail._preservedForObservation); Debug.Assert(tail._frozenForEnqueues); long count = 0; // Head segment. We've already marked it as frozen for enqueues, so its tail position is fixed, // and we've already marked it as preserved for observation (before we grabbed the head), so we // can safely enumerate from its head to its tail and access its elements. int headTail = (head == tail ? tailTail : Volatile.Read(ref head._headAndTail.Tail)) - head.FreezeOffset; if (headHead < headTail) { // Mask the head and tail for the head segment headHead &= head._slotsMask; headTail &= head._slotsMask; // Increase the count by either the one or two regions, based on whether tail // has wrapped to be less than head. count += headHead < headTail ? headTail - headHead : head._slots.Length - headHead + headTail; } // We've enumerated the head. If the tail is different from the head, we need to // enumerate the remaining segments. if (head != tail) { // Count the contents of each segment between head and tail, not including head and tail. // Since there were segments before these, for our purposes we consider them to start at // the 0th element, and since there is at least one segment after each, each was frozen // by the time we snapped it, so we can iterate until each's frozen tail. for (ConcurrentQueueSegment<T> s = head._nextSegment!; s != tail; s = s._nextSegment!) { Debug.Assert(s._preservedForObservation); Debug.Assert(s._frozenForEnqueues); count += s._headAndTail.Tail - s.FreezeOffset; } // Finally, enumerate the tail. As with the intermediate segments, there were segments // before this in the snapped region, so we can start counting from the beginning. Unlike // the intermediate segments, we can't just go until the Tail, as that could still be changing; // instead we need to go until the tail we snapped for observation. count += tailTail - tail.FreezeOffset; } // Return the computed count. return count; } /// <summary> /// Copies the <see cref="ConcurrentQueue{T}"/> elements to an existing one-dimensional <see /// cref="Array">Array</see>, starting at the specified array index. /// </summary> /// <param name="array">The one-dimensional <see cref="Array">Array</see> that is the /// destination of the elements copied from the /// <see cref="ConcurrentQueue{T}"/>. The <see cref="Array">Array</see> must have zero-based /// indexing.</param> /// <param name="index">The zero-based index in <paramref name="array"/> at which copying /// begins.</param> /// <exception cref="ArgumentNullException"><paramref name="array"/> is a null reference (Nothing in /// Visual Basic).</exception> /// <exception cref="ArgumentOutOfRangeException"><paramref name="index"/> is less than /// zero.</exception> /// <exception cref="ArgumentException"><paramref name="index"/> is equal to or greater than the /// length of the <paramref name="array"/> /// -or- The number of elements in the source <see cref="ConcurrentQueue{T}"/> is greater than the /// available space from <paramref name="index"/> to the end of the destination <paramref /// name="array"/>. /// </exception> public void CopyTo(T[] array, int index) { if (array == null) { ThrowHelper.ThrowArgumentNullException(ExceptionArgument.array); } if (index < 0) { ThrowHelper.ThrowArgumentOutOfRangeException(ExceptionArgument.index); } // Snap for enumeration SnapForObservation(out ConcurrentQueueSegment<T> head, out int headHead, out ConcurrentQueueSegment<T> tail, out int tailTail); // Get the number of items to be enumerated long count = GetCount(head, headHead, tail, tailTail); if (index > array.Length - count) { ThrowHelper.ThrowArgumentException(ExceptionResource.Arg_ArrayPlusOffTooSmall); } // Copy the items to the target array int i = index; using (IEnumerator<T> e = Enumerate(head, headHead, tail, tailTail)) { while (e.MoveNext()) { array[i++] = e.Current; } } Debug.Assert(count == i - index); } /// <summary>Returns an enumerator that iterates through the <see cref="ConcurrentQueue{T}"/>.</summary> /// <returns>An enumerator for the contents of the <see /// cref="ConcurrentQueue{T}"/>.</returns> /// <remarks> /// The enumeration represents a moment-in-time snapshot of the contents /// of the queue. It does not reflect any updates to the collection after /// <see cref="GetEnumerator"/> was called. The enumerator is safe to use /// concurrently with reads from and writes to the queue. /// </remarks> public IEnumerator<T> GetEnumerator() { SnapForObservation(out ConcurrentQueueSegment<T> head, out int headHead, out ConcurrentQueueSegment<T> tail, out int tailTail); return Enumerate(head, headHead, tail, tailTail); } /// <summary> /// Gets the head and tail information of the current contents of the queue. /// After this call returns, the specified region can be enumerated any number /// of times and will not change. /// </summary> private void SnapForObservation(out ConcurrentQueueSegment<T> head, out int headHead, out ConcurrentQueueSegment<T> tail, out int tailTail) { lock (_crossSegmentLock) // _head and _tail may only change while the lock is held. { // Snap the head and tail head = _head; tail = _tail; Debug.Assert(head != null); Debug.Assert(tail != null); Debug.Assert(tail._nextSegment == null); // Mark them and all segments in between as preserving, and ensure no additional items // can be added to the tail. for (ConcurrentQueueSegment<T> s = head; ; s = s._nextSegment!) { s._preservedForObservation = true; if (s == tail) break; Debug.Assert(s._frozenForEnqueues); // any non-tail should already be marked } tail.EnsureFrozenForEnqueues(); // we want to prevent the tailTail from moving // At this point, any dequeues from any segment won't overwrite the value, and // none of the existing segments can have new items enqueued. headHead = Volatile.Read(ref head._headAndTail.Head); tailTail = Volatile.Read(ref tail._headAndTail.Tail); } } /// <summary>Gets the item stored in the <paramref name="i"/>th entry in <paramref name="segment"/>.</summary> private static T GetItemWhenAvailable(ConcurrentQueueSegment<T> segment, int i) { Debug.Assert(segment._preservedForObservation); // Get the expected value for the sequence number int expectedSequenceNumberAndMask = (i + 1) & segment._slotsMask; // If the expected sequence number is not yet written, we're still waiting for // an enqueuer to finish storing it. Spin until it's there. if ((segment._slots[i].SequenceNumber & segment._slotsMask) != expectedSequenceNumberAndMask) { SpinWait spinner = default; while ((Volatile.Read(ref segment._slots[i].SequenceNumber) & segment._slotsMask) != expectedSequenceNumberAndMask) { spinner.SpinOnce(); } } // Return the value from the slot. return segment._slots[i].Item!; } private static IEnumerator<T> Enumerate(ConcurrentQueueSegment<T> head, int headHead, ConcurrentQueueSegment<T> tail, int tailTail) { Debug.Assert(head._preservedForObservation); Debug.Assert(head._frozenForEnqueues); Debug.Assert(tail._preservedForObservation); Debug.Assert(tail._frozenForEnqueues); // Head segment. We've already marked it as not accepting any more enqueues, // so its tail position is fixed, and we've already marked it as preserved for // enumeration (before we grabbed its head), so we can safely enumerate from // its head to its tail. int headTail = (head == tail ? tailTail : Volatile.Read(ref head._headAndTail.Tail)) - head.FreezeOffset; if (headHead < headTail) { headHead &= head._slotsMask; headTail &= head._slotsMask; if (headHead < headTail) { for (int i = headHead; i < headTail; i++) yield return GetItemWhenAvailable(head, i); } else { for (int i = headHead; i < head._slots.Length; i++) yield return GetItemWhenAvailable(head, i); for (int i = 0; i < headTail; i++) yield return GetItemWhenAvailable(head, i); } } // We've enumerated the head. If the tail is the same, we're done. if (head != tail) { // Each segment between head and tail, not including head and tail. Since there were // segments before these, for our purposes we consider it to start at the 0th element. for (ConcurrentQueueSegment<T> s = head._nextSegment!; s != tail; s = s._nextSegment!) { Debug.Assert(s._preservedForObservation, "Would have had to been preserved as a segment part of enumeration"); Debug.Assert(s._frozenForEnqueues, "Would have had to be frozen for enqueues as it's intermediate"); int sTail = s._headAndTail.Tail - s.FreezeOffset; for (int i = 0; i < sTail; i++) { yield return GetItemWhenAvailable(s, i); } } // Enumerate the tail. Since there were segments before this, we can just start at // its beginning, and iterate until the tail we already grabbed. tailTail -= tail.FreezeOffset; for (int i = 0; i < tailTail; i++) { yield return GetItemWhenAvailable(tail, i); } } } /// <summary>Adds an object to the end of the <see cref="ConcurrentQueue{T}"/>.</summary> /// <param name="item"> /// The object to add to the end of the <see cref="ConcurrentQueue{T}"/>. /// The value can be a null reference (Nothing in Visual Basic) for reference types. /// </param> public void Enqueue(T item) { // Try to enqueue to the current tail. if (!_tail.TryEnqueue(item)) { // If we're unable to, we need to take a slow path that will // try to add a new tail segment. EnqueueSlow(item); } } /// <summary>Adds to the end of the queue, adding a new segment if necessary.</summary> private void EnqueueSlow(T item) { while (true) { ConcurrentQueueSegment<T> tail = _tail; // Try to append to the existing tail. if (tail.TryEnqueue(item)) { return; } // If we were unsuccessful, take the lock so that we can compare and manipulate // the tail. Assuming another enqueuer hasn't already added a new segment, // do so, then loop around to try enqueueing again. lock (_crossSegmentLock) { if (tail == _tail) { // Make sure no one else can enqueue to this segment. tail.EnsureFrozenForEnqueues(); // We determine the new segment's length based on the old length. // In general, we double the size of the segment, to make it less likely // that we'll need to grow again. However, if the tail segment is marked // as preserved for observation, something caused us to avoid reusing this // segment, and if that happens a lot and we grow, we'll end up allocating // lots of wasted space. As such, in such situations we reset back to the // initial segment length; if these observations are happening frequently, // this will help to avoid wasted memory, and if they're not, we'll // relatively quickly grow again to a larger size. int nextSize = tail._preservedForObservation ? InitialSegmentLength : Math.Min(tail.Capacity * 2, MaxSegmentLength); var newTail = new ConcurrentQueueSegment<T>(nextSize); // Hook up the new tail. tail._nextSegment = newTail; _tail = newTail; } } } } /// <summary> /// Attempts to remove and return the object at the beginning of the <see /// cref="ConcurrentQueue{T}"/>. /// </summary> /// <param name="result"> /// When this method returns, if the operation was successful, <paramref name="result"/> contains the /// object removed. If no object was available to be removed, the value is unspecified. /// </param> /// <returns> /// true if an element was removed and returned from the beginning of the /// <see cref="ConcurrentQueue{T}"/> successfully; otherwise, false. /// </returns> public bool TryDequeue([MaybeNullWhen(false)] out T result) { // Get the current head ConcurrentQueueSegment<T> head = _head; // Try to take. If we're successful, we're done. if (head.TryDequeue(out result)) { return true; } // Check to see whether this segment is the last. If it is, we can consider // this to be a moment-in-time empty condition (even though between the TryDequeue // check and this check, another item could have arrived). if (head._nextSegment == null) { result = default!; return false; } return TryDequeueSlow(out result); // slow path that needs to fix up segments } /// <summary>Tries to dequeue an item, removing empty segments as needed.</summary> private bool TryDequeueSlow([MaybeNullWhen(false)] out T item) { while (true) { // Get the current head ConcurrentQueueSegment<T> head = _head; // Try to take. If we're successful, we're done. if (head.TryDequeue(out item)) { return true; } // Check to see whether this segment is the last. If it is, we can consider // this to be a moment-in-time empty condition (even though between the TryDequeue // check and this check, another item could have arrived). if (head._nextSegment == null) { item = default; return false; } // At this point we know that head.Next != null, which means // this segment has been frozen for additional enqueues. But between // the time that we ran TryDequeue and checked for a next segment, // another item could have been added. Try to dequeue one more time // to confirm that the segment is indeed empty. Debug.Assert(head._frozenForEnqueues); if (head.TryDequeue(out item)) { return true; } // This segment is frozen (nothing more can be added) and empty (nothing is in it). // Update head to point to the next segment in the list, assuming no one's beat us to it. lock (_crossSegmentLock) { if (head == _head) { _head = head._nextSegment; } } } } /// <summary> /// Attempts to return an object from the beginning of the <see cref="ConcurrentQueue{T}"/> /// without removing it. /// </summary> /// <param name="result"> /// When this method returns, <paramref name="result"/> contains an object from /// the beginning of the <see cref="Concurrent.ConcurrentQueue{T}"/> or default(T) /// if the operation failed. /// </param> /// <returns>true if and object was returned successfully; otherwise, false.</returns> /// <remarks> /// For determining whether the collection contains any items, use of the <see cref="IsEmpty"/> /// property is recommended rather than peeking. /// </remarks> public bool TryPeek([MaybeNullWhen(false)] out T result) => TryPeek(out result, resultUsed: true); /// <summary>Attempts to retrieve the value for the first element in the queue.</summary> /// <param name="result">The value of the first element, if found.</param> /// <param name="resultUsed">true if the result is needed; otherwise false if only the true/false outcome is needed.</param> /// <returns>true if an element was found; otherwise, false.</returns> private bool TryPeek([MaybeNullWhen(false)] out T result, bool resultUsed) { // Starting with the head segment, look through all of the segments // for the first one we can find that's not empty. ConcurrentQueueSegment<T> s = _head; while (true) { // Grab the next segment from this one, before we peek. // This is to be able to see whether the value has changed // during the peek operation. ConcurrentQueueSegment<T>? next = Volatile.Read(ref s._nextSegment); // Peek at the segment. If we find an element, we're done. if (s.TryPeek(out result, resultUsed)) { return true; } // The current segment was empty at the moment we checked. if (next != null) { // If prior to the peek there was already a next segment, then // during the peek no additional items could have been enqueued // to it and we can just move on to check the next segment. Debug.Assert(next == s._nextSegment); s = next; } else if (Volatile.Read(ref s._nextSegment) == null) { // The next segment is null. Nothing more to peek at. break; } // The next segment was null before we peeked but non-null after. // That means either when we peeked the first segment had // already been frozen but the new segment not yet added, // or that the first segment was empty and between the time // that we peeked and then checked _nextSegment, so many items // were enqueued that we filled the first segment and went // into the next. Since we need to peek in order, we simply // loop around again to peek on the same segment. The next // time around on this segment we'll then either successfully // peek or we'll find that next was non-null before peeking, // and we'll traverse to that segment. } result = default; return false; } /// <summary> /// Removes all objects from the <see cref="ConcurrentQueue{T}"/>. /// </summary> public void Clear() { lock (_crossSegmentLock) { // Simply substitute a new segment for the existing head/tail, // as is done in the constructor. Operations currently in flight // may still read from or write to an existing segment that's // getting dropped, meaning that in flight operations may not be // linear with regards to this clear operation. To help mitigate // in-flight operations enqueuing onto the tail that's about to // be dropped, we first freeze it; that'll force enqueuers to take // this lock to synchronize and see the new tail. _tail.EnsureFrozenForEnqueues(); _tail = _head = new ConcurrentQueueSegment<T>(InitialSegmentLength); } } } }
-1
dotnet/runtime
66,372
Add Stopwatch.GetElapsedTime
Fixes https://github.com/dotnet/runtime/issues/65858
stephentoub
2022-03-09T01:52:28Z
2022-03-09T12:42:15Z
ca731545a58307870a0baebb0ee43eeea61f175f
c9f7f7389e8e9a00d501aef696333b67d218baac
Add Stopwatch.GetElapsedTime. Fixes https://github.com/dotnet/runtime/issues/65858
./src/tests/JIT/Methodical/cctor/xassem/xprecise2_cs_d.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> <RequiresProcessIsolation>true</RequiresProcessIsolation> </PropertyGroup> <PropertyGroup> <DebugType>Full</DebugType> <Optimize>False</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="xprecise2.cs" /> </ItemGroup> <ItemGroup> <ProjectReference Include="testlib.csproj" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> <RequiresProcessIsolation>true</RequiresProcessIsolation> </PropertyGroup> <PropertyGroup> <DebugType>Full</DebugType> <Optimize>False</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="xprecise2.cs" /> </ItemGroup> <ItemGroup> <ProjectReference Include="testlib.csproj" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,372
Add Stopwatch.GetElapsedTime
Fixes https://github.com/dotnet/runtime/issues/65858
stephentoub
2022-03-09T01:52:28Z
2022-03-09T12:42:15Z
ca731545a58307870a0baebb0ee43eeea61f175f
c9f7f7389e8e9a00d501aef696333b67d218baac
Add Stopwatch.GetElapsedTime. Fixes https://github.com/dotnet/runtime/issues/65858
./src/tests/JIT/Generics/Instantiation/delegates/Delegate003.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Threading; internal delegate T GenDelegate<T>(T p1, out T p2); internal class Foo { virtual public int Function(int i, out int j) { j = i; return i; } } internal class Test_Delegate003 { public static int Main() { int i, j; Foo inst = new Foo(); GenDelegate<int> MyDelegate = new GenDelegate<int>(inst.Function); i = MyDelegate(10, out j); if ((i != 10) || (j != 10)) { Console.WriteLine("Failed Sync Invokation"); return 1; } Console.WriteLine("Test Passes"); return 100; } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Threading; internal delegate T GenDelegate<T>(T p1, out T p2); internal class Foo { virtual public int Function(int i, out int j) { j = i; return i; } } internal class Test_Delegate003 { public static int Main() { int i, j; Foo inst = new Foo(); GenDelegate<int> MyDelegate = new GenDelegate<int>(inst.Function); i = MyDelegate(10, out j); if ((i != 10) || (j != 10)) { Console.WriteLine("Failed Sync Invokation"); return 1; } Console.WriteLine("Test Passes"); return 100; } }
-1
dotnet/runtime
66,372
Add Stopwatch.GetElapsedTime
Fixes https://github.com/dotnet/runtime/issues/65858
stephentoub
2022-03-09T01:52:28Z
2022-03-09T12:42:15Z
ca731545a58307870a0baebb0ee43eeea61f175f
c9f7f7389e8e9a00d501aef696333b67d218baac
Add Stopwatch.GetElapsedTime. Fixes https://github.com/dotnet/runtime/issues/65858
./src/mono/wasm/.gitignore
!Makefile .stamp-wasm-install-and-select* emsdk
!Makefile .stamp-wasm-install-and-select* emsdk
-1
dotnet/runtime
66,372
Add Stopwatch.GetElapsedTime
Fixes https://github.com/dotnet/runtime/issues/65858
stephentoub
2022-03-09T01:52:28Z
2022-03-09T12:42:15Z
ca731545a58307870a0baebb0ee43eeea61f175f
c9f7f7389e8e9a00d501aef696333b67d218baac
Add Stopwatch.GetElapsedTime. Fixes https://github.com/dotnet/runtime/issues/65858
./src/libraries/Common/src/Interop/OSX/Interop.libproc.GetProcessInfoById.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Runtime.InteropServices; #pragma warning disable CA1823 // analyzer incorrectly flags fixed buffer length const (https://github.com/dotnet/roslyn/issues/37593) internal static partial class Interop { internal static partial class @libproc { // Constants from sys\param.h private const int MAXCOMLEN = 16; // Constants from proc_info.h private const int PROC_PIDTASKALLINFO = 2; // From proc_info.h [StructLayout(LayoutKind.Sequential)] internal unsafe struct proc_bsdinfo { internal uint pbi_flags; internal uint pbi_status; internal uint pbi_xstatus; internal uint pbi_pid; internal uint pbi_ppid; internal uint pbi_uid; internal uint pbi_gid; internal uint pbi_ruid; internal uint pbi_rgid; internal uint pbi_svuid; internal uint pbi_svgid; internal uint reserved; internal fixed byte pbi_comm[MAXCOMLEN]; internal fixed byte pbi_name[MAXCOMLEN * 2]; internal uint pbi_nfiles; internal uint pbi_pgid; internal uint pbi_pjobc; internal uint e_tdev; internal uint e_tpgid; internal int pbi_nice; internal ulong pbi_start_tvsec; internal ulong pbi_start_tvusec; } // From proc_info.h [StructLayout(LayoutKind.Sequential)] internal struct proc_taskinfo { internal ulong pti_virtual_size; internal ulong pti_resident_size; internal ulong pti_total_user; internal ulong pti_total_system; internal ulong pti_threads_user; internal ulong pti_threads_system; internal int pti_policy; internal int pti_faults; internal int pti_pageins; internal int pti_cow_faults; internal int pti_messages_sent; internal int pti_messages_received; internal int pti_syscalls_mach; internal int pti_syscalls_unix; internal int pti_csw; internal int pti_threadnum; internal int pti_numrunning; internal int pti_priority; }; // From proc_info.h [StructLayout(LayoutKind.Sequential, CharSet = CharSet.Ansi)] internal struct proc_taskallinfo { internal proc_bsdinfo pbsd; internal proc_taskinfo ptinfo; } /// <summary> /// Gets information about a process given it's PID /// </summary> /// <param name="pid">The PID of the process</param> /// <param name="flavor">Should be PROC_PIDTASKALLINFO</param> /// <param name="arg">Flavor dependent value</param> /// <param name="buffer">A pointer to a block of memory (of size proc_taskallinfo) allocated that will contain the data</param> /// <param name="bufferSize">The size of the allocated block above</param> /// <returns> /// The amount of data actually returned. If this size matches the bufferSize parameter then /// the data is valid. If the sizes do not match then the data is invalid, most likely due /// to not having enough permissions to query for the data of that specific process /// </returns> [GeneratedDllImport(Interop.Libraries.libproc, SetLastError = true)] private static unsafe partial int proc_pidinfo( int pid, int flavor, ulong arg, proc_taskallinfo* buffer, int bufferSize); /// <summary> /// Gets the process information for a given process /// </summary> /// <param name="pid">The PID (process ID) of the process</param> /// <returns> /// Returns a valid proc_taskallinfo struct for valid processes that the caller /// has permission to access; otherwise, returns null /// </returns> internal static unsafe proc_taskallinfo? GetProcessInfoById(int pid) { // Negative PIDs are invalid if (pid < 0) { throw new ArgumentOutOfRangeException(nameof(pid)); } // Get the process information for the specified pid int size = sizeof(proc_taskallinfo); proc_taskallinfo info = default(proc_taskallinfo); int result = proc_pidinfo(pid, PROC_PIDTASKALLINFO, 0, &info, size); return (result == size ? new proc_taskallinfo?(info) : null); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Runtime.InteropServices; #pragma warning disable CA1823 // analyzer incorrectly flags fixed buffer length const (https://github.com/dotnet/roslyn/issues/37593) internal static partial class Interop { internal static partial class @libproc { // Constants from sys\param.h private const int MAXCOMLEN = 16; // Constants from proc_info.h private const int PROC_PIDTASKALLINFO = 2; // From proc_info.h [StructLayout(LayoutKind.Sequential)] internal unsafe struct proc_bsdinfo { internal uint pbi_flags; internal uint pbi_status; internal uint pbi_xstatus; internal uint pbi_pid; internal uint pbi_ppid; internal uint pbi_uid; internal uint pbi_gid; internal uint pbi_ruid; internal uint pbi_rgid; internal uint pbi_svuid; internal uint pbi_svgid; internal uint reserved; internal fixed byte pbi_comm[MAXCOMLEN]; internal fixed byte pbi_name[MAXCOMLEN * 2]; internal uint pbi_nfiles; internal uint pbi_pgid; internal uint pbi_pjobc; internal uint e_tdev; internal uint e_tpgid; internal int pbi_nice; internal ulong pbi_start_tvsec; internal ulong pbi_start_tvusec; } // From proc_info.h [StructLayout(LayoutKind.Sequential)] internal struct proc_taskinfo { internal ulong pti_virtual_size; internal ulong pti_resident_size; internal ulong pti_total_user; internal ulong pti_total_system; internal ulong pti_threads_user; internal ulong pti_threads_system; internal int pti_policy; internal int pti_faults; internal int pti_pageins; internal int pti_cow_faults; internal int pti_messages_sent; internal int pti_messages_received; internal int pti_syscalls_mach; internal int pti_syscalls_unix; internal int pti_csw; internal int pti_threadnum; internal int pti_numrunning; internal int pti_priority; }; // From proc_info.h [StructLayout(LayoutKind.Sequential, CharSet = CharSet.Ansi)] internal struct proc_taskallinfo { internal proc_bsdinfo pbsd; internal proc_taskinfo ptinfo; } /// <summary> /// Gets information about a process given it's PID /// </summary> /// <param name="pid">The PID of the process</param> /// <param name="flavor">Should be PROC_PIDTASKALLINFO</param> /// <param name="arg">Flavor dependent value</param> /// <param name="buffer">A pointer to a block of memory (of size proc_taskallinfo) allocated that will contain the data</param> /// <param name="bufferSize">The size of the allocated block above</param> /// <returns> /// The amount of data actually returned. If this size matches the bufferSize parameter then /// the data is valid. If the sizes do not match then the data is invalid, most likely due /// to not having enough permissions to query for the data of that specific process /// </returns> [GeneratedDllImport(Interop.Libraries.libproc, SetLastError = true)] private static unsafe partial int proc_pidinfo( int pid, int flavor, ulong arg, proc_taskallinfo* buffer, int bufferSize); /// <summary> /// Gets the process information for a given process /// </summary> /// <param name="pid">The PID (process ID) of the process</param> /// <returns> /// Returns a valid proc_taskallinfo struct for valid processes that the caller /// has permission to access; otherwise, returns null /// </returns> internal static unsafe proc_taskallinfo? GetProcessInfoById(int pid) { // Negative PIDs are invalid if (pid < 0) { throw new ArgumentOutOfRangeException(nameof(pid)); } // Get the process information for the specified pid int size = sizeof(proc_taskallinfo); proc_taskallinfo info = default(proc_taskallinfo); int result = proc_pidinfo(pid, PROC_PIDTASKALLINFO, 0, &info, size); return (result == size ? new proc_taskallinfo?(info) : null); } } }
-1
dotnet/runtime
66,372
Add Stopwatch.GetElapsedTime
Fixes https://github.com/dotnet/runtime/issues/65858
stephentoub
2022-03-09T01:52:28Z
2022-03-09T12:42:15Z
ca731545a58307870a0baebb0ee43eeea61f175f
c9f7f7389e8e9a00d501aef696333b67d218baac
Add Stopwatch.GetElapsedTime. Fixes https://github.com/dotnet/runtime/issues/65858
./src/libraries/System.Security.Cryptography.Csp/tests/PasswordDeriveBytesTests.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using Test.Cryptography; using Xunit; namespace System.Security.Cryptography.DeriveBytesTests { public class PasswordDeriveBytesTests { // Note some tests were copied from Rfc2898DeriveBytes (and modified accordingly). private static readonly byte[] s_testSalt = new byte[] { 9, 5, 5, 5, 1, 2, 1, 2 }; private static readonly byte[] s_testSaltB = new byte[] { 0, 4, 0, 4, 1, 9, 7, 5 }; private const string TestPassword = "PasswordGoesHere"; private const string TestPasswordB = "FakePasswordsAreHard"; private const int DefaultIterationCount = 100; [Fact] public static void Ctor_NullPasswordBytes() { using (var pdb = new PasswordDeriveBytes((byte[])null, s_testSalt)) { Assert.Equal(DefaultIterationCount, pdb.IterationCount); Assert.Equal(s_testSalt, pdb.Salt); Assert.Equal("SHA1", pdb.HashName); } } [Fact] public static void Ctor_NullPasswordString() { Assert.Throws<ArgumentNullException>(() => new PasswordDeriveBytes((string)null, s_testSalt)); } [Fact] public static void Ctor_NullSalt() { using (var pdb = new PasswordDeriveBytes(TestPassword, null)) { Assert.Equal(DefaultIterationCount, pdb.IterationCount); Assert.Null(pdb.Salt); Assert.Equal("SHA1", pdb.HashName); } } [Fact] public static void Ctor_EmptySalt() { using (var pdb = new PasswordDeriveBytes(TestPassword, Array.Empty<byte>())) { Assert.Equal(DefaultIterationCount, pdb.IterationCount); Assert.Equal(Array.Empty<byte>(), pdb.Salt); Assert.Equal("SHA1", pdb.HashName); } } [Fact] public static void Ctor_DiminishedSalt() { using (var pdb = new PasswordDeriveBytes(TestPassword, new byte[7])) { Assert.Equal(DefaultIterationCount, pdb.IterationCount); Assert.Equal(7, pdb.Salt.Length); Assert.Equal("SHA1", pdb.HashName); } } [Fact] public static void Ctor_TooFewIterations() { Assert.Throws<ArgumentOutOfRangeException>(() => new PasswordDeriveBytes(TestPassword, s_testSalt, "SHA1", 0)); } [Fact] public static void Ctor_NegativeIterations() { Assert.Throws<ArgumentOutOfRangeException>(() => new PasswordDeriveBytes(TestPassword, s_testSalt, "SHA1", -1)); Assert.Throws<ArgumentOutOfRangeException>(() => new PasswordDeriveBytes(TestPassword, s_testSalt, "SHA1", int.MinValue)); Assert.Throws<ArgumentOutOfRangeException>(() => new PasswordDeriveBytes(TestPassword, s_testSalt, "SHA1", int.MinValue / 2)); } [Fact] public static void Ctor_DefaultIterations() { using (var deriveBytes = new PasswordDeriveBytes(TestPassword, s_testSalt)) { Assert.Equal(DefaultIterationCount, deriveBytes.IterationCount); } } [Fact] public static void Ctor_IterationsRespected() { using (var deriveBytes = new PasswordDeriveBytes(TestPassword, s_testSalt, "SHA1", 1)) { Assert.Equal(1, deriveBytes.IterationCount); } } [Fact] public static void Ctor_CspParameters() { using (var deriveBytes = new PasswordDeriveBytes(TestPassword, s_testSalt, new CspParameters())) { } using (var deriveBytes = new PasswordDeriveBytes(string.Empty, s_testSalt, new CspParameters())) { } using (var deriveBytes = new PasswordDeriveBytes(TestPassword, s_testSalt, "SHA1", 100, new CspParameters())) { } using (var deriveBytes = new PasswordDeriveBytes(string.Empty, s_testSalt, "SHA1", 100, new CspParameters())) { } } [Fact] public static void Ctor_CspParameters_Null() { using (var deriveBytes = new PasswordDeriveBytes(TestPassword, s_testSalt, null)) { } using (var deriveBytes = new PasswordDeriveBytes(string.Empty, s_testSalt, null)) { } using (var deriveBytes = new PasswordDeriveBytes(TestPassword, s_testSalt, "SHA1", 100, null)) { } using (var deriveBytes = new PasswordDeriveBytes(string.Empty, s_testSalt, "SHA1", 100, null)) { } } [Fact] public static void Ctor_SaltCopied() { byte[] saltIn = (byte[])s_testSalt.Clone(); using (var deriveBytes = new PasswordDeriveBytes(TestPassword, saltIn, "SHA1", DefaultIterationCount)) { byte[] saltOut = deriveBytes.Salt; Assert.NotSame(saltIn, saltOut); Assert.Equal(saltIn, saltOut); // Right now we know that at least one of the constructor and get_Salt made a copy, if it was // only get_Salt then this next part would fail. saltIn[0] = unchecked((byte)~saltIn[0]); // Have to read the property again to prove it's detached. Assert.NotEqual(saltIn, deriveBytes.Salt); } } [Fact] public static void GetSaltCopies() { byte[] first; byte[] second; using (var deriveBytes = new PasswordDeriveBytes(TestPassword, s_testSalt, "SHA1", DefaultIterationCount)) { first = deriveBytes.Salt; second = deriveBytes.Salt; } Assert.NotSame(first, second); Assert.Equal(first, second); } [Fact] public static void SetSaltAfterGetBytes_Throws() { using (var deriveBytes = new PasswordDeriveBytes(TestPassword, s_testSalt)) { deriveBytes.GetBytes(1); Assert.Throws<CryptographicException>(() => deriveBytes.Salt = s_testSalt); } } [Fact] public static void SetSaltAfterGetBytes_Reset() { using (var deriveBytes = new PasswordDeriveBytes(TestPassword, s_testSalt)) { deriveBytes.GetBytes(1); deriveBytes.Reset(); deriveBytes.Salt = s_testSaltB; Assert.Equal(s_testSaltB, deriveBytes.Salt); } } [Fact] public static void MinimumAcceptableInputs() { byte[] output; using (var deriveBytes = new PasswordDeriveBytes(string.Empty, new byte[8], "SHA1", 1)) { output = deriveBytes.GetBytes(1); } Assert.Equal(1, output.Length); Assert.Equal(0xF8, output[0]); } [Fact] public static void GetBytes_ZeroLength() { using (var deriveBytes = new PasswordDeriveBytes(TestPassword, s_testSalt)) { AssertExtensions.Throws<ArgumentException>(null, () => deriveBytes.GetBytes(0)); } } [Fact] public static void GetBytes_NegativeLength() { using (var deriveBytes = new PasswordDeriveBytes(TestPassword, s_testSalt)) { Assert.Throws<OverflowException>(() => deriveBytes.GetBytes(-1)); Assert.Throws<OverflowException>(() => deriveBytes.GetBytes(int.MinValue)); Assert.Throws<OverflowException>(() => deriveBytes.GetBytes(int.MinValue / 2)); } } [Fact] public static void GetBytes_NotIdempotent() { byte[] first; byte[] second; using (var deriveBytes = new PasswordDeriveBytes(TestPassword, s_testSalt)) { first = deriveBytes.GetBytes(32); second = deriveBytes.GetBytes(32); } Assert.NotEqual(first, second); } [Fact] public static void GetBytes_StableIfReset() { byte[] first; byte[] second; using (var deriveBytes = new PasswordDeriveBytes(TestPassword, s_testSalt)) { first = deriveBytes.GetBytes(32); deriveBytes.Reset(); second = deriveBytes.GetBytes(32); } Assert.Equal(first, second); } [Fact] public static void GetBytes_StreamLike_ExtraBytes() { byte[] first; using (var deriveBytes = new PasswordDeriveBytes(TestPassword, s_testSalt)) { // SHA1 default Assert.Equal("SHA1", deriveBytes.HashName); // Request double of SHA1 hash size first = deriveBytes.GetBytes(40); } byte[] second = new byte[first.Length]; // Reset using (var deriveBytes = new PasswordDeriveBytes(TestPassword, s_testSalt)) { // Make two passes over the hash byte[] secondFirstHalf = deriveBytes.GetBytes(first.Length / 2); // Since we requested 20 bytes there are no 'extra' bytes left over to cause the "_extraCount" bug // in GetBytes(); that issue is tested in GetBytes_StreamLike_Bug_Compat. // Request 20 'extra' bytes in one call byte[] secondSecondHalf = deriveBytes.GetBytes(first.Length - secondFirstHalf.Length); Buffer.BlockCopy(secondFirstHalf, 0, second, 0, secondFirstHalf.Length); Buffer.BlockCopy(secondSecondHalf, 0, second, secondFirstHalf.Length, secondSecondHalf.Length); } Assert.Equal(first, second); } [Fact] public static void GetBytes_StreamLike_Bug_Compat() { byte[] first; using (var deriveBytes = new PasswordDeriveBytes(TestPassword, s_testSalt)) { Assert.Equal("SHA1", deriveBytes.HashName); // Request 20 bytes (SHA1 hash size) plus 12 extra bytes first = deriveBytes.GetBytes(32); } byte[] second = new byte[first.Length]; // Reset using (var deriveBytes = new PasswordDeriveBytes(TestPassword, s_testSalt)) { // Ask for half now (16 bytes) byte[] firstHalf = deriveBytes.GetBytes(first.Length / 2); // Ask for the other half now (16 bytes) byte[] lastHalf = deriveBytes.GetBytes(first.Length - firstHalf.Length); // lastHalf should contain the last 4 bytes from the SHA1 hash plus 12 extra bytes // but due to the _extraCount bug it doesn't. // Merge the two buffers into the second array Buffer.BlockCopy(firstHalf, 0, second, 0, firstHalf.Length); Buffer.BlockCopy(lastHalf, 0, second, firstHalf.Length, lastHalf.Length); } // Fails due to _extraCount bug (the bug is fixed in Rfc2898DeriveBytes) Assert.NotEqual(first, second); // However, the first 16 bytes will be equal because the _extraCount bug does // not affect the first call, only the subsequent GetBytes() call. byte[] first_firstHalf = new byte[first.Length / 2]; byte[] second_firstHalf = new byte[first.Length / 2]; Buffer.BlockCopy(first, 0, first_firstHalf, 0, first_firstHalf.Length); Buffer.BlockCopy(second, 0, second_firstHalf, 0, second_firstHalf.Length); Assert.Equal(first_firstHalf, second_firstHalf); } [Fact] public static void GetBytes_Boundary() { using (var deriveBytes = new PasswordDeriveBytes(TestPassword, s_testSalt)) { // Boundary case success deriveBytes.GetBytes(1000 * 20); // Boundary case failure Assert.Throws<CryptographicException>(() => deriveBytes.GetBytes(1)); } } [Fact] public static void GetBytes_KnownValues_MD5_32() { TestKnownValue_GetBytes( HashAlgorithmName.MD5, TestPassword, s_testSalt, DefaultIterationCount, ByteUtils.HexToByteArray("F8D88E9DAFC828DA2400F5144271C2F630A1C061C654FC9DE2E7900E121461B9")); } [Fact] public static void GetBytes_KnownValues_SHA256_40() { TestKnownValue_GetBytes( HashAlgorithmName.SHA256, TestPassword, s_testSalt, DefaultIterationCount, ByteUtils.HexToByteArray("3774A17468276057717A90C25B72915921D8F8C046F7868868DBB99BB4C4031CADE9E26BE77BEA39")); } [Fact] public static void GetBytes_KnownValues_SHA1_40() { TestKnownValue_GetBytes( HashAlgorithmName.SHA1, TestPassword, s_testSalt, DefaultIterationCount, ByteUtils.HexToByteArray("12F2497EC3EB78B0EA32AABFD8B9515FBC800BEEB6316A4DDF4EA62518341488A116DA3BBC26C685")); } [Fact] public static void GetBytes_KnownValues_SHA1_40_2() { TestKnownValue_GetBytes( HashAlgorithmName.SHA1, TestPassword, s_testSalt, DefaultIterationCount + 1, ByteUtils.HexToByteArray("FB6199E4D9BB017D2F3AF6964F3299971607C6B984934A9E43140631957429160C33A6630EF12E31")); } [Fact] public static void GetBytes_KnownValues_SHA1_40_3() { TestKnownValue_GetBytes( HashAlgorithmName.SHA1, TestPassword, s_testSaltB, DefaultIterationCount, ByteUtils.HexToByteArray("DCA4851AB3C9960CF387E64DE7A1B2E09616BEA6A4666AAFAC31F1670F23530E38BD4BF4D9248A08")); } [Fact] public static void GetBytes_KnownValues_SHA1_40_4() { TestKnownValue_GetBytes( HashAlgorithmName.SHA1, TestPasswordB, s_testSalt, DefaultIterationCount, ByteUtils.HexToByteArray("1DCA2A3405E93D9E3F7CD10653444F2FD93F5BE32C4B1BEDDF94D0D67461CBE86B5BDFEB32071E96")); } [Fact] [PlatformSpecific(TestPlatforms.Windows)] // No support for CryptDeriveKey on Unix public static void CryptDeriveKey_KnownValues_TripleDes() { byte[] key = TestKnownValue_CryptDeriveKey( HashAlgorithmName.SHA1, TestPassword, "TripleDES", 192, s_testSalt, ByteUtils.HexToByteArray("97628A641949D99DCED35DB0ABCE20F21FF4DA9B46E00BCE")); // Verify key is valid using (var alg = new TripleDESCryptoServiceProvider()) { alg.Key = key; alg.IV = new byte[8]; alg.Padding = PaddingMode.None; alg.Mode = CipherMode.CBC; byte[] plainText = "79a86903608e133e020e1dc68c9835250c2f17b0ebeed91b".HexToByteArray(); byte[] cipher = alg.Encrypt(plainText); byte[] expectedCipher = "9DC863445642B88AC46B3B107CB5A0ACC1596A176962EE8F".HexToByteArray(); Assert.Equal<byte>(expectedCipher, cipher); byte[] decrypted = alg.Decrypt(cipher); byte[] expectedDecrypted = "79a86903608e133e020e1dc68c9835250c2f17b0ebeed91b".HexToByteArray(); Assert.Equal<byte>(expectedDecrypted, decrypted); } } [Fact] [PlatformSpecific(TestPlatforms.Windows)] // No support for CryptDeriveKey on Unix public static void CryptDeriveKey_KnownValues_RC2() { TestKnownValue_CryptDeriveKey( HashAlgorithmName.SHA1, TestPassword, "RC2", 128, s_testSalt, ByteUtils.HexToByteArray("B0695D8D98F5844B9650A9F68EFF105B")); TestKnownValue_CryptDeriveKey( HashAlgorithmName.SHA256, TestPassword, "RC2", 128, s_testSalt, ByteUtils.HexToByteArray("CF4A1CA60093E71D6B740DBB962B3C66")); TestKnownValue_CryptDeriveKey( HashAlgorithmName.MD5, TestPassword, "RC2", 128, s_testSalt, ByteUtils.HexToByteArray("84F4B6854CDF896A86FB493B852B6E1F")); } [Fact] [PlatformSpecific(TestPlatforms.Windows)] // No support for CryptDeriveKey on Unix public static void CryptDeriveKey_KnownValues_RC2_NoSalt() { TestKnownValue_CryptDeriveKey( HashAlgorithmName.SHA1, TestPassword, "RC2", 128, null, // Salt is not used here so we should get same key value ByteUtils.HexToByteArray("B0695D8D98F5844B9650A9F68EFF105B")); } [Fact] [PlatformSpecific(TestPlatforms.Windows)] // No support for CryptDeriveKey on Unix public static void CryptDeriveKey_KnownValues_DES() { TestKnownValue_CryptDeriveKey( HashAlgorithmName.SHA1, TestPassword, "DES", 64, s_testSalt, ByteUtils.HexToByteArray("B0685D8C98F4854A")); } [Fact] [PlatformSpecific(TestPlatforms.Windows)] // No support for CryptDeriveKey on Unix public static void CryptDeriveKey_Invalid_KeyLength() { using (var deriveBytes = new PasswordDeriveBytes(TestPassword, s_testSalt)) { Assert.ThrowsAny<CryptographicException>(() => deriveBytes.CryptDeriveKey("RC2", "SHA1", 127, s_testSalt)); Assert.ThrowsAny<CryptographicException>(() => deriveBytes.CryptDeriveKey("RC2", "SHA1", 129, s_testSalt)); } } [Fact] [PlatformSpecific(TestPlatforms.Windows)] // No support for CryptDeriveKey on Unix public static void CryptDeriveKey_Invalid_Algorithm() { using (var deriveBytes = new PasswordDeriveBytes(TestPassword, s_testSalt)) { Assert.Throws<CryptographicException>(() => deriveBytes.CryptDeriveKey("BADALG", "SHA1", 128, s_testSalt)); } } [Fact] [PlatformSpecific(TestPlatforms.Windows)] // No support for CryptDeriveKey on Unix public static void CryptDeriveKey_Invalid_HashAlgorithm() { using (var deriveBytes = new PasswordDeriveBytes(TestPassword, s_testSalt)) { Assert.Throws<CryptographicException>(() => deriveBytes.CryptDeriveKey("RC2", "BADALG", 128, s_testSalt)); } } [Fact] [PlatformSpecific(TestPlatforms.Windows)] // No support for CryptDeriveKey on Unix public static void CryptDeriveKey_Invalid_IV() { using (var deriveBytes = new PasswordDeriveBytes(TestPassword, s_testSalt)) { Assert.Throws<CryptographicException>(() => deriveBytes.CryptDeriveKey("RC2", "SHA1", 128, null)); Assert.Throws<CryptographicException>(() => deriveBytes.CryptDeriveKey("RC2", "SHA1", 128, new byte[1])); } } [Fact] [PlatformSpecific(TestPlatforms.AnyUnix)] public static void CryptDeriveKey_Throws_Unix() { using (var deriveBytes = new PasswordDeriveBytes(TestPassword, s_testSalt)) { Assert.Throws<PlatformNotSupportedException>(() => (deriveBytes.CryptDeriveKey("RC2", "SHA1", 128, null))); } } private static byte[] TestKnownValue_CryptDeriveKey(HashAlgorithmName hashName, string password, string alg, int keySize, byte[] salt, byte[] expected) { byte[] output; byte[] iv = new byte[8]; using (var deriveBytes = new PasswordDeriveBytes(password, salt)) { output = deriveBytes.CryptDeriveKey(alg, hashName.Name, keySize, iv); } Assert.Equal(expected, output); // For these tests, the returned IV is always zero Assert.Equal(new byte[8], iv); return output; } private static void TestKnownValue_GetBytes(HashAlgorithmName hashName, string password, byte[] salt, int iterationCount, byte[] expected) { byte[] output; using (var deriveBytes = new PasswordDeriveBytes(password, salt, hashName.Name, iterationCount)) { output = deriveBytes.GetBytes(expected.Length); } Assert.Equal(expected, output); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using Test.Cryptography; using Xunit; namespace System.Security.Cryptography.DeriveBytesTests { public class PasswordDeriveBytesTests { // Note some tests were copied from Rfc2898DeriveBytes (and modified accordingly). private static readonly byte[] s_testSalt = new byte[] { 9, 5, 5, 5, 1, 2, 1, 2 }; private static readonly byte[] s_testSaltB = new byte[] { 0, 4, 0, 4, 1, 9, 7, 5 }; private const string TestPassword = "PasswordGoesHere"; private const string TestPasswordB = "FakePasswordsAreHard"; private const int DefaultIterationCount = 100; [Fact] public static void Ctor_NullPasswordBytes() { using (var pdb = new PasswordDeriveBytes((byte[])null, s_testSalt)) { Assert.Equal(DefaultIterationCount, pdb.IterationCount); Assert.Equal(s_testSalt, pdb.Salt); Assert.Equal("SHA1", pdb.HashName); } } [Fact] public static void Ctor_NullPasswordString() { Assert.Throws<ArgumentNullException>(() => new PasswordDeriveBytes((string)null, s_testSalt)); } [Fact] public static void Ctor_NullSalt() { using (var pdb = new PasswordDeriveBytes(TestPassword, null)) { Assert.Equal(DefaultIterationCount, pdb.IterationCount); Assert.Null(pdb.Salt); Assert.Equal("SHA1", pdb.HashName); } } [Fact] public static void Ctor_EmptySalt() { using (var pdb = new PasswordDeriveBytes(TestPassword, Array.Empty<byte>())) { Assert.Equal(DefaultIterationCount, pdb.IterationCount); Assert.Equal(Array.Empty<byte>(), pdb.Salt); Assert.Equal("SHA1", pdb.HashName); } } [Fact] public static void Ctor_DiminishedSalt() { using (var pdb = new PasswordDeriveBytes(TestPassword, new byte[7])) { Assert.Equal(DefaultIterationCount, pdb.IterationCount); Assert.Equal(7, pdb.Salt.Length); Assert.Equal("SHA1", pdb.HashName); } } [Fact] public static void Ctor_TooFewIterations() { Assert.Throws<ArgumentOutOfRangeException>(() => new PasswordDeriveBytes(TestPassword, s_testSalt, "SHA1", 0)); } [Fact] public static void Ctor_NegativeIterations() { Assert.Throws<ArgumentOutOfRangeException>(() => new PasswordDeriveBytes(TestPassword, s_testSalt, "SHA1", -1)); Assert.Throws<ArgumentOutOfRangeException>(() => new PasswordDeriveBytes(TestPassword, s_testSalt, "SHA1", int.MinValue)); Assert.Throws<ArgumentOutOfRangeException>(() => new PasswordDeriveBytes(TestPassword, s_testSalt, "SHA1", int.MinValue / 2)); } [Fact] public static void Ctor_DefaultIterations() { using (var deriveBytes = new PasswordDeriveBytes(TestPassword, s_testSalt)) { Assert.Equal(DefaultIterationCount, deriveBytes.IterationCount); } } [Fact] public static void Ctor_IterationsRespected() { using (var deriveBytes = new PasswordDeriveBytes(TestPassword, s_testSalt, "SHA1", 1)) { Assert.Equal(1, deriveBytes.IterationCount); } } [Fact] public static void Ctor_CspParameters() { using (var deriveBytes = new PasswordDeriveBytes(TestPassword, s_testSalt, new CspParameters())) { } using (var deriveBytes = new PasswordDeriveBytes(string.Empty, s_testSalt, new CspParameters())) { } using (var deriveBytes = new PasswordDeriveBytes(TestPassword, s_testSalt, "SHA1", 100, new CspParameters())) { } using (var deriveBytes = new PasswordDeriveBytes(string.Empty, s_testSalt, "SHA1", 100, new CspParameters())) { } } [Fact] public static void Ctor_CspParameters_Null() { using (var deriveBytes = new PasswordDeriveBytes(TestPassword, s_testSalt, null)) { } using (var deriveBytes = new PasswordDeriveBytes(string.Empty, s_testSalt, null)) { } using (var deriveBytes = new PasswordDeriveBytes(TestPassword, s_testSalt, "SHA1", 100, null)) { } using (var deriveBytes = new PasswordDeriveBytes(string.Empty, s_testSalt, "SHA1", 100, null)) { } } [Fact] public static void Ctor_SaltCopied() { byte[] saltIn = (byte[])s_testSalt.Clone(); using (var deriveBytes = new PasswordDeriveBytes(TestPassword, saltIn, "SHA1", DefaultIterationCount)) { byte[] saltOut = deriveBytes.Salt; Assert.NotSame(saltIn, saltOut); Assert.Equal(saltIn, saltOut); // Right now we know that at least one of the constructor and get_Salt made a copy, if it was // only get_Salt then this next part would fail. saltIn[0] = unchecked((byte)~saltIn[0]); // Have to read the property again to prove it's detached. Assert.NotEqual(saltIn, deriveBytes.Salt); } } [Fact] public static void GetSaltCopies() { byte[] first; byte[] second; using (var deriveBytes = new PasswordDeriveBytes(TestPassword, s_testSalt, "SHA1", DefaultIterationCount)) { first = deriveBytes.Salt; second = deriveBytes.Salt; } Assert.NotSame(first, second); Assert.Equal(first, second); } [Fact] public static void SetSaltAfterGetBytes_Throws() { using (var deriveBytes = new PasswordDeriveBytes(TestPassword, s_testSalt)) { deriveBytes.GetBytes(1); Assert.Throws<CryptographicException>(() => deriveBytes.Salt = s_testSalt); } } [Fact] public static void SetSaltAfterGetBytes_Reset() { using (var deriveBytes = new PasswordDeriveBytes(TestPassword, s_testSalt)) { deriveBytes.GetBytes(1); deriveBytes.Reset(); deriveBytes.Salt = s_testSaltB; Assert.Equal(s_testSaltB, deriveBytes.Salt); } } [Fact] public static void MinimumAcceptableInputs() { byte[] output; using (var deriveBytes = new PasswordDeriveBytes(string.Empty, new byte[8], "SHA1", 1)) { output = deriveBytes.GetBytes(1); } Assert.Equal(1, output.Length); Assert.Equal(0xF8, output[0]); } [Fact] public static void GetBytes_ZeroLength() { using (var deriveBytes = new PasswordDeriveBytes(TestPassword, s_testSalt)) { AssertExtensions.Throws<ArgumentException>(null, () => deriveBytes.GetBytes(0)); } } [Fact] public static void GetBytes_NegativeLength() { using (var deriveBytes = new PasswordDeriveBytes(TestPassword, s_testSalt)) { Assert.Throws<OverflowException>(() => deriveBytes.GetBytes(-1)); Assert.Throws<OverflowException>(() => deriveBytes.GetBytes(int.MinValue)); Assert.Throws<OverflowException>(() => deriveBytes.GetBytes(int.MinValue / 2)); } } [Fact] public static void GetBytes_NotIdempotent() { byte[] first; byte[] second; using (var deriveBytes = new PasswordDeriveBytes(TestPassword, s_testSalt)) { first = deriveBytes.GetBytes(32); second = deriveBytes.GetBytes(32); } Assert.NotEqual(first, second); } [Fact] public static void GetBytes_StableIfReset() { byte[] first; byte[] second; using (var deriveBytes = new PasswordDeriveBytes(TestPassword, s_testSalt)) { first = deriveBytes.GetBytes(32); deriveBytes.Reset(); second = deriveBytes.GetBytes(32); } Assert.Equal(first, second); } [Fact] public static void GetBytes_StreamLike_ExtraBytes() { byte[] first; using (var deriveBytes = new PasswordDeriveBytes(TestPassword, s_testSalt)) { // SHA1 default Assert.Equal("SHA1", deriveBytes.HashName); // Request double of SHA1 hash size first = deriveBytes.GetBytes(40); } byte[] second = new byte[first.Length]; // Reset using (var deriveBytes = new PasswordDeriveBytes(TestPassword, s_testSalt)) { // Make two passes over the hash byte[] secondFirstHalf = deriveBytes.GetBytes(first.Length / 2); // Since we requested 20 bytes there are no 'extra' bytes left over to cause the "_extraCount" bug // in GetBytes(); that issue is tested in GetBytes_StreamLike_Bug_Compat. // Request 20 'extra' bytes in one call byte[] secondSecondHalf = deriveBytes.GetBytes(first.Length - secondFirstHalf.Length); Buffer.BlockCopy(secondFirstHalf, 0, second, 0, secondFirstHalf.Length); Buffer.BlockCopy(secondSecondHalf, 0, second, secondFirstHalf.Length, secondSecondHalf.Length); } Assert.Equal(first, second); } [Fact] public static void GetBytes_StreamLike_Bug_Compat() { byte[] first; using (var deriveBytes = new PasswordDeriveBytes(TestPassword, s_testSalt)) { Assert.Equal("SHA1", deriveBytes.HashName); // Request 20 bytes (SHA1 hash size) plus 12 extra bytes first = deriveBytes.GetBytes(32); } byte[] second = new byte[first.Length]; // Reset using (var deriveBytes = new PasswordDeriveBytes(TestPassword, s_testSalt)) { // Ask for half now (16 bytes) byte[] firstHalf = deriveBytes.GetBytes(first.Length / 2); // Ask for the other half now (16 bytes) byte[] lastHalf = deriveBytes.GetBytes(first.Length - firstHalf.Length); // lastHalf should contain the last 4 bytes from the SHA1 hash plus 12 extra bytes // but due to the _extraCount bug it doesn't. // Merge the two buffers into the second array Buffer.BlockCopy(firstHalf, 0, second, 0, firstHalf.Length); Buffer.BlockCopy(lastHalf, 0, second, firstHalf.Length, lastHalf.Length); } // Fails due to _extraCount bug (the bug is fixed in Rfc2898DeriveBytes) Assert.NotEqual(first, second); // However, the first 16 bytes will be equal because the _extraCount bug does // not affect the first call, only the subsequent GetBytes() call. byte[] first_firstHalf = new byte[first.Length / 2]; byte[] second_firstHalf = new byte[first.Length / 2]; Buffer.BlockCopy(first, 0, first_firstHalf, 0, first_firstHalf.Length); Buffer.BlockCopy(second, 0, second_firstHalf, 0, second_firstHalf.Length); Assert.Equal(first_firstHalf, second_firstHalf); } [Fact] public static void GetBytes_Boundary() { using (var deriveBytes = new PasswordDeriveBytes(TestPassword, s_testSalt)) { // Boundary case success deriveBytes.GetBytes(1000 * 20); // Boundary case failure Assert.Throws<CryptographicException>(() => deriveBytes.GetBytes(1)); } } [Fact] public static void GetBytes_KnownValues_MD5_32() { TestKnownValue_GetBytes( HashAlgorithmName.MD5, TestPassword, s_testSalt, DefaultIterationCount, ByteUtils.HexToByteArray("F8D88E9DAFC828DA2400F5144271C2F630A1C061C654FC9DE2E7900E121461B9")); } [Fact] public static void GetBytes_KnownValues_SHA256_40() { TestKnownValue_GetBytes( HashAlgorithmName.SHA256, TestPassword, s_testSalt, DefaultIterationCount, ByteUtils.HexToByteArray("3774A17468276057717A90C25B72915921D8F8C046F7868868DBB99BB4C4031CADE9E26BE77BEA39")); } [Fact] public static void GetBytes_KnownValues_SHA1_40() { TestKnownValue_GetBytes( HashAlgorithmName.SHA1, TestPassword, s_testSalt, DefaultIterationCount, ByteUtils.HexToByteArray("12F2497EC3EB78B0EA32AABFD8B9515FBC800BEEB6316A4DDF4EA62518341488A116DA3BBC26C685")); } [Fact] public static void GetBytes_KnownValues_SHA1_40_2() { TestKnownValue_GetBytes( HashAlgorithmName.SHA1, TestPassword, s_testSalt, DefaultIterationCount + 1, ByteUtils.HexToByteArray("FB6199E4D9BB017D2F3AF6964F3299971607C6B984934A9E43140631957429160C33A6630EF12E31")); } [Fact] public static void GetBytes_KnownValues_SHA1_40_3() { TestKnownValue_GetBytes( HashAlgorithmName.SHA1, TestPassword, s_testSaltB, DefaultIterationCount, ByteUtils.HexToByteArray("DCA4851AB3C9960CF387E64DE7A1B2E09616BEA6A4666AAFAC31F1670F23530E38BD4BF4D9248A08")); } [Fact] public static void GetBytes_KnownValues_SHA1_40_4() { TestKnownValue_GetBytes( HashAlgorithmName.SHA1, TestPasswordB, s_testSalt, DefaultIterationCount, ByteUtils.HexToByteArray("1DCA2A3405E93D9E3F7CD10653444F2FD93F5BE32C4B1BEDDF94D0D67461CBE86B5BDFEB32071E96")); } [Fact] [PlatformSpecific(TestPlatforms.Windows)] // No support for CryptDeriveKey on Unix public static void CryptDeriveKey_KnownValues_TripleDes() { byte[] key = TestKnownValue_CryptDeriveKey( HashAlgorithmName.SHA1, TestPassword, "TripleDES", 192, s_testSalt, ByteUtils.HexToByteArray("97628A641949D99DCED35DB0ABCE20F21FF4DA9B46E00BCE")); // Verify key is valid using (var alg = new TripleDESCryptoServiceProvider()) { alg.Key = key; alg.IV = new byte[8]; alg.Padding = PaddingMode.None; alg.Mode = CipherMode.CBC; byte[] plainText = "79a86903608e133e020e1dc68c9835250c2f17b0ebeed91b".HexToByteArray(); byte[] cipher = alg.Encrypt(plainText); byte[] expectedCipher = "9DC863445642B88AC46B3B107CB5A0ACC1596A176962EE8F".HexToByteArray(); Assert.Equal<byte>(expectedCipher, cipher); byte[] decrypted = alg.Decrypt(cipher); byte[] expectedDecrypted = "79a86903608e133e020e1dc68c9835250c2f17b0ebeed91b".HexToByteArray(); Assert.Equal<byte>(expectedDecrypted, decrypted); } } [Fact] [PlatformSpecific(TestPlatforms.Windows)] // No support for CryptDeriveKey on Unix public static void CryptDeriveKey_KnownValues_RC2() { TestKnownValue_CryptDeriveKey( HashAlgorithmName.SHA1, TestPassword, "RC2", 128, s_testSalt, ByteUtils.HexToByteArray("B0695D8D98F5844B9650A9F68EFF105B")); TestKnownValue_CryptDeriveKey( HashAlgorithmName.SHA256, TestPassword, "RC2", 128, s_testSalt, ByteUtils.HexToByteArray("CF4A1CA60093E71D6B740DBB962B3C66")); TestKnownValue_CryptDeriveKey( HashAlgorithmName.MD5, TestPassword, "RC2", 128, s_testSalt, ByteUtils.HexToByteArray("84F4B6854CDF896A86FB493B852B6E1F")); } [Fact] [PlatformSpecific(TestPlatforms.Windows)] // No support for CryptDeriveKey on Unix public static void CryptDeriveKey_KnownValues_RC2_NoSalt() { TestKnownValue_CryptDeriveKey( HashAlgorithmName.SHA1, TestPassword, "RC2", 128, null, // Salt is not used here so we should get same key value ByteUtils.HexToByteArray("B0695D8D98F5844B9650A9F68EFF105B")); } [Fact] [PlatformSpecific(TestPlatforms.Windows)] // No support for CryptDeriveKey on Unix public static void CryptDeriveKey_KnownValues_DES() { TestKnownValue_CryptDeriveKey( HashAlgorithmName.SHA1, TestPassword, "DES", 64, s_testSalt, ByteUtils.HexToByteArray("B0685D8C98F4854A")); } [Fact] [PlatformSpecific(TestPlatforms.Windows)] // No support for CryptDeriveKey on Unix public static void CryptDeriveKey_Invalid_KeyLength() { using (var deriveBytes = new PasswordDeriveBytes(TestPassword, s_testSalt)) { Assert.ThrowsAny<CryptographicException>(() => deriveBytes.CryptDeriveKey("RC2", "SHA1", 127, s_testSalt)); Assert.ThrowsAny<CryptographicException>(() => deriveBytes.CryptDeriveKey("RC2", "SHA1", 129, s_testSalt)); } } [Fact] [PlatformSpecific(TestPlatforms.Windows)] // No support for CryptDeriveKey on Unix public static void CryptDeriveKey_Invalid_Algorithm() { using (var deriveBytes = new PasswordDeriveBytes(TestPassword, s_testSalt)) { Assert.Throws<CryptographicException>(() => deriveBytes.CryptDeriveKey("BADALG", "SHA1", 128, s_testSalt)); } } [Fact] [PlatformSpecific(TestPlatforms.Windows)] // No support for CryptDeriveKey on Unix public static void CryptDeriveKey_Invalid_HashAlgorithm() { using (var deriveBytes = new PasswordDeriveBytes(TestPassword, s_testSalt)) { Assert.Throws<CryptographicException>(() => deriveBytes.CryptDeriveKey("RC2", "BADALG", 128, s_testSalt)); } } [Fact] [PlatformSpecific(TestPlatforms.Windows)] // No support for CryptDeriveKey on Unix public static void CryptDeriveKey_Invalid_IV() { using (var deriveBytes = new PasswordDeriveBytes(TestPassword, s_testSalt)) { Assert.Throws<CryptographicException>(() => deriveBytes.CryptDeriveKey("RC2", "SHA1", 128, null)); Assert.Throws<CryptographicException>(() => deriveBytes.CryptDeriveKey("RC2", "SHA1", 128, new byte[1])); } } [Fact] [PlatformSpecific(TestPlatforms.AnyUnix)] public static void CryptDeriveKey_Throws_Unix() { using (var deriveBytes = new PasswordDeriveBytes(TestPassword, s_testSalt)) { Assert.Throws<PlatformNotSupportedException>(() => (deriveBytes.CryptDeriveKey("RC2", "SHA1", 128, null))); } } private static byte[] TestKnownValue_CryptDeriveKey(HashAlgorithmName hashName, string password, string alg, int keySize, byte[] salt, byte[] expected) { byte[] output; byte[] iv = new byte[8]; using (var deriveBytes = new PasswordDeriveBytes(password, salt)) { output = deriveBytes.CryptDeriveKey(alg, hashName.Name, keySize, iv); } Assert.Equal(expected, output); // For these tests, the returned IV is always zero Assert.Equal(new byte[8], iv); return output; } private static void TestKnownValue_GetBytes(HashAlgorithmName hashName, string password, byte[] salt, int iterationCount, byte[] expected) { byte[] output; using (var deriveBytes = new PasswordDeriveBytes(password, salt, hashName.Name, iterationCount)) { output = deriveBytes.GetBytes(expected.Length); } Assert.Equal(expected, output); } } }
-1
dotnet/runtime
66,372
Add Stopwatch.GetElapsedTime
Fixes https://github.com/dotnet/runtime/issues/65858
stephentoub
2022-03-09T01:52:28Z
2022-03-09T12:42:15Z
ca731545a58307870a0baebb0ee43eeea61f175f
c9f7f7389e8e9a00d501aef696333b67d218baac
Add Stopwatch.GetElapsedTime. Fixes https://github.com/dotnet/runtime/issues/65858
./src/tests/JIT/Methodical/int64/signed/s_ldfld_mulovf_il_r.ilproj
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <OutputType>Exe</OutputType> </PropertyGroup> <PropertyGroup> <DebugType>PdbOnly</DebugType> </PropertyGroup> <ItemGroup> <Compile Include="s_ldfld_mulovf.il" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <OutputType>Exe</OutputType> </PropertyGroup> <PropertyGroup> <DebugType>PdbOnly</DebugType> </PropertyGroup> <ItemGroup> <Compile Include="s_ldfld_mulovf.il" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,372
Add Stopwatch.GetElapsedTime
Fixes https://github.com/dotnet/runtime/issues/65858
stephentoub
2022-03-09T01:52:28Z
2022-03-09T12:42:15Z
ca731545a58307870a0baebb0ee43eeea61f175f
c9f7f7389e8e9a00d501aef696333b67d218baac
Add Stopwatch.GetElapsedTime. Fixes https://github.com/dotnet/runtime/issues/65858
./src/libraries/System.Linq.Queryable/tests/JoinTests.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.Linq.Expressions; using Xunit; namespace System.Linq.Tests { public class JoinTests : EnumerableBasedTests { public struct CustomerRec { public string name; public int custID; } public struct OrderRec { public int orderID; public int custID; public int total; } public struct AnagramRec { public string name; public int orderID; public int total; } public struct JoinRec { public string name; public int orderID; public int total; } [Fact] public void FirstOuterMatchesLastInnerLastOuterMatchesFirstInnerSameNumberElements() { CustomerRec[] outer = new [] { new CustomerRec{ name = "Prakash", custID = 98022 }, new CustomerRec{ name = "Tim", custID = 99021 }, new CustomerRec{ name = "Robert", custID = 99022 } }; OrderRec[] inner = new [] { new OrderRec{ orderID = 45321, custID = 99022, total = 50 }, new OrderRec{ orderID = 43421, custID = 29022, total = 20 }, new OrderRec{ orderID = 95421, custID = 98022, total = 9 } }; JoinRec[] expected = new [] { new JoinRec{ name = "Prakash", orderID = 95421, total = 9 }, new JoinRec{ name = "Robert", orderID = 45321, total = 50 } }; Assert.Equal(expected, outer.AsQueryable().Join(inner.AsQueryable(), e => e.custID, e => e.custID, (cr, or) => new JoinRec { name = cr.name, orderID = or.orderID, total = or.total })); } [Fact] public void NullComparer() { CustomerRec[] outer = new [] { new CustomerRec{ name = "Prakash", custID = 98022 }, new CustomerRec{ name = "Tim", custID = 99021 }, new CustomerRec{ name = "Robert", custID = 99022 } }; AnagramRec[] inner = new [] { new AnagramRec{ name = "miT", orderID = 43455, total = 10 }, new AnagramRec{ name = "Prakash", orderID = 323232, total = 9 } }; JoinRec[] expected = new [] { new JoinRec{ name = "Prakash", orderID = 323232, total = 9 } }; Assert.Equal(expected, outer.AsQueryable().Join(inner.AsQueryable(), e => e.name, e => e.name, (cr, or) => new JoinRec { name = cr.name, orderID = or.orderID, total = or.total }, null)); } [Fact] public void CustomComparer() { CustomerRec[] outer = new [] { new CustomerRec{ name = "Prakash", custID = 98022 }, new CustomerRec{ name = "Tim", custID = 99021 }, new CustomerRec{ name = "Robert", custID = 99022 } }; AnagramRec[] inner = new [] { new AnagramRec{ name = "miT", orderID = 43455, total = 10 }, new AnagramRec{ name = "Prakash", orderID = 323232, total = 9 } }; JoinRec[] expected = new [] { new JoinRec{ name = "Prakash", orderID = 323232, total = 9 }, new JoinRec{ name = "Tim", orderID = 43455, total = 10 } }; Assert.Equal(expected, outer.AsQueryable().Join(inner.AsQueryable(), e => e.name, e => e.name, (cr, or) => new JoinRec { name = cr.name, orderID = or.orderID, total = or.total }, new AnagramEqualityComparer())); } [Fact] public void OuterNull() { IQueryable<CustomerRec> outer = null; AnagramRec[] inner = new [] { new AnagramRec{ name = "miT", orderID = 43455, total = 10 }, new AnagramRec{ name = "Prakash", orderID = 323232, total = 9 } }; AssertExtensions.Throws<ArgumentNullException>("outer", () => outer.Join(inner.AsQueryable(), e => e.name, e => e.name, (cr, or) => new JoinRec { name = cr.name, orderID = or.orderID, total = or.total }, new AnagramEqualityComparer())); } [Fact] public void InnerNull() { CustomerRec[] outer = new [] { new CustomerRec{ name = "Prakash", custID = 98022 }, new CustomerRec{ name = "Tim", custID = 99021 }, new CustomerRec{ name = "Robert", custID = 99022 } }; IQueryable<AnagramRec> inner = null; AssertExtensions.Throws<ArgumentNullException>("inner", () => outer.AsQueryable().Join(inner, e => e.name, e => e.name, (cr, or) => new JoinRec { name = cr.name, orderID = or.orderID, total = or.total }, new AnagramEqualityComparer())); } [Fact] public void OuterKeySelectorNull() { CustomerRec[] outer = new [] { new CustomerRec{ name = "Prakash", custID = 98022 }, new CustomerRec{ name = "Tim", custID = 99021 }, new CustomerRec{ name = "Robert", custID = 99022 } }; AnagramRec[] inner = new [] { new AnagramRec{ name = "miT", orderID = 43455, total = 10 }, new AnagramRec{ name = "Prakash", orderID = 323232, total = 9 } }; AssertExtensions.Throws<ArgumentNullException>("outerKeySelector", () => outer.AsQueryable().Join(inner.AsQueryable(), null, e => e.name, (cr, or) => new JoinRec { name = cr.name, orderID = or.orderID, total = or.total }, new AnagramEqualityComparer())); } [Fact] public void InnerKeySelectorNull() { CustomerRec[] outer = new [] { new CustomerRec{ name = "Prakash", custID = 98022 }, new CustomerRec{ name = "Tim", custID = 99021 }, new CustomerRec{ name = "Robert", custID = 99022 } }; AnagramRec[] inner = new [] { new AnagramRec{ name = "miT", orderID = 43455, total = 10 }, new AnagramRec{ name = "Prakash", orderID = 323232, total = 9 } }; AssertExtensions.Throws<ArgumentNullException>("innerKeySelector", () => outer.AsQueryable().Join(inner.AsQueryable(), e => e.name, null, (cr, or) => new JoinRec { name = cr.name, orderID = or.orderID, total = or.total }, new AnagramEqualityComparer())); } [Fact] public void ResultSelectorNull() { CustomerRec[] outer = new [] { new CustomerRec{ name = "Prakash", custID = 98022 }, new CustomerRec{ name = "Tim", custID = 99021 }, new CustomerRec{ name = "Robert", custID = 99022 } }; AnagramRec[] inner = new [] { new AnagramRec{ name = "miT", orderID = 43455, total = 10 }, new AnagramRec{ name = "Prakash", orderID = 323232, total = 9 } }; AssertExtensions.Throws<ArgumentNullException>("resultSelector", () => outer.AsQueryable().Join(inner.AsQueryable(), e => e.name, e => e.name, (Expression<Func<CustomerRec, AnagramRec, JoinRec>>)null, new AnagramEqualityComparer())); } [Fact] public void OuterNullNoComparer() { IQueryable<CustomerRec> outer = null; AnagramRec[] inner = new[] { new AnagramRec{ name = "miT", orderID = 43455, total = 10 }, new AnagramRec{ name = "Prakash", orderID = 323232, total = 9 } }; AssertExtensions.Throws<ArgumentNullException>("outer", () => outer.Join(inner.AsQueryable(), e => e.name, e => e.name, (cr, or) => new JoinRec { name = cr.name, orderID = or.orderID, total = or.total })); } [Fact] public void InnerNullNoComparer() { CustomerRec[] outer = new[] { new CustomerRec{ name = "Prakash", custID = 98022 }, new CustomerRec{ name = "Tim", custID = 99021 }, new CustomerRec{ name = "Robert", custID = 99022 } }; IQueryable<AnagramRec> inner = null; AssertExtensions.Throws<ArgumentNullException>("inner", () => outer.AsQueryable().Join(inner, e => e.name, e => e.name, (cr, or) => new JoinRec { name = cr.name, orderID = or.orderID, total = or.total })); } [Fact] public void OuterKeySelectorNullNoComparer() { CustomerRec[] outer = new[] { new CustomerRec{ name = "Prakash", custID = 98022 }, new CustomerRec{ name = "Tim", custID = 99021 }, new CustomerRec{ name = "Robert", custID = 99022 } }; AnagramRec[] inner = new[] { new AnagramRec{ name = "miT", orderID = 43455, total = 10 }, new AnagramRec{ name = "Prakash", orderID = 323232, total = 9 } }; AssertExtensions.Throws<ArgumentNullException>("outerKeySelector", () => outer.AsQueryable().Join(inner.AsQueryable(), null, e => e.name, (cr, or) => new JoinRec { name = cr.name, orderID = or.orderID, total = or.total })); } [Fact] public void InnerKeySelectorNullNoComparer() { CustomerRec[] outer = new[] { new CustomerRec{ name = "Prakash", custID = 98022 }, new CustomerRec{ name = "Tim", custID = 99021 }, new CustomerRec{ name = "Robert", custID = 99022 } }; AnagramRec[] inner = new[] { new AnagramRec{ name = "miT", orderID = 43455, total = 10 }, new AnagramRec{ name = "Prakash", orderID = 323232, total = 9 } }; AssertExtensions.Throws<ArgumentNullException>("innerKeySelector", () => outer.AsQueryable().Join(inner.AsQueryable(), e => e.name, null, (cr, or) => new JoinRec { name = cr.name, orderID = or.orderID, total = or.total })); } [Fact] public void ResultSelectorNullNoComparer() { CustomerRec[] outer = new[] { new CustomerRec{ name = "Prakash", custID = 98022 }, new CustomerRec{ name = "Tim", custID = 99021 }, new CustomerRec{ name = "Robert", custID = 99022 } }; AnagramRec[] inner = new[] { new AnagramRec{ name = "miT", orderID = 43455, total = 10 }, new AnagramRec{ name = "Prakash", orderID = 323232, total = 9 } }; AssertExtensions.Throws<ArgumentNullException>("resultSelector", () => outer.AsQueryable().Join(inner.AsQueryable(), e => e.name, e => e.name, (Expression<Func<CustomerRec, AnagramRec, JoinRec>>)null)); } [Fact] public void SelectorsReturnNull() { int?[] inner = { null, null, null }; int?[] outer = { null, null }; Assert.Empty(outer.AsQueryable().Join(inner.AsQueryable(), e => e, e => e, (x, y) => x)); } [Fact] public void Join1() { var count = (new int[] { 0, 1, 2 }).AsQueryable().Join(new int[] { 1, 2, 3 }, n1 => n1, n2 => n2, (n1, n2) => n1 + n2).Count(); Assert.Equal(2, count); } [Fact] public void Join2() { var count = (new int[] { 0, 1, 2 }).AsQueryable().Join(new int[] { 1, 2, 3 }, n1 => n1, n2 => n2, (n1, n2) => n1 + n2, EqualityComparer<int>.Default).Count(); Assert.Equal(2, count); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.Linq.Expressions; using Xunit; namespace System.Linq.Tests { public class JoinTests : EnumerableBasedTests { public struct CustomerRec { public string name; public int custID; } public struct OrderRec { public int orderID; public int custID; public int total; } public struct AnagramRec { public string name; public int orderID; public int total; } public struct JoinRec { public string name; public int orderID; public int total; } [Fact] public void FirstOuterMatchesLastInnerLastOuterMatchesFirstInnerSameNumberElements() { CustomerRec[] outer = new [] { new CustomerRec{ name = "Prakash", custID = 98022 }, new CustomerRec{ name = "Tim", custID = 99021 }, new CustomerRec{ name = "Robert", custID = 99022 } }; OrderRec[] inner = new [] { new OrderRec{ orderID = 45321, custID = 99022, total = 50 }, new OrderRec{ orderID = 43421, custID = 29022, total = 20 }, new OrderRec{ orderID = 95421, custID = 98022, total = 9 } }; JoinRec[] expected = new [] { new JoinRec{ name = "Prakash", orderID = 95421, total = 9 }, new JoinRec{ name = "Robert", orderID = 45321, total = 50 } }; Assert.Equal(expected, outer.AsQueryable().Join(inner.AsQueryable(), e => e.custID, e => e.custID, (cr, or) => new JoinRec { name = cr.name, orderID = or.orderID, total = or.total })); } [Fact] public void NullComparer() { CustomerRec[] outer = new [] { new CustomerRec{ name = "Prakash", custID = 98022 }, new CustomerRec{ name = "Tim", custID = 99021 }, new CustomerRec{ name = "Robert", custID = 99022 } }; AnagramRec[] inner = new [] { new AnagramRec{ name = "miT", orderID = 43455, total = 10 }, new AnagramRec{ name = "Prakash", orderID = 323232, total = 9 } }; JoinRec[] expected = new [] { new JoinRec{ name = "Prakash", orderID = 323232, total = 9 } }; Assert.Equal(expected, outer.AsQueryable().Join(inner.AsQueryable(), e => e.name, e => e.name, (cr, or) => new JoinRec { name = cr.name, orderID = or.orderID, total = or.total }, null)); } [Fact] public void CustomComparer() { CustomerRec[] outer = new [] { new CustomerRec{ name = "Prakash", custID = 98022 }, new CustomerRec{ name = "Tim", custID = 99021 }, new CustomerRec{ name = "Robert", custID = 99022 } }; AnagramRec[] inner = new [] { new AnagramRec{ name = "miT", orderID = 43455, total = 10 }, new AnagramRec{ name = "Prakash", orderID = 323232, total = 9 } }; JoinRec[] expected = new [] { new JoinRec{ name = "Prakash", orderID = 323232, total = 9 }, new JoinRec{ name = "Tim", orderID = 43455, total = 10 } }; Assert.Equal(expected, outer.AsQueryable().Join(inner.AsQueryable(), e => e.name, e => e.name, (cr, or) => new JoinRec { name = cr.name, orderID = or.orderID, total = or.total }, new AnagramEqualityComparer())); } [Fact] public void OuterNull() { IQueryable<CustomerRec> outer = null; AnagramRec[] inner = new [] { new AnagramRec{ name = "miT", orderID = 43455, total = 10 }, new AnagramRec{ name = "Prakash", orderID = 323232, total = 9 } }; AssertExtensions.Throws<ArgumentNullException>("outer", () => outer.Join(inner.AsQueryable(), e => e.name, e => e.name, (cr, or) => new JoinRec { name = cr.name, orderID = or.orderID, total = or.total }, new AnagramEqualityComparer())); } [Fact] public void InnerNull() { CustomerRec[] outer = new [] { new CustomerRec{ name = "Prakash", custID = 98022 }, new CustomerRec{ name = "Tim", custID = 99021 }, new CustomerRec{ name = "Robert", custID = 99022 } }; IQueryable<AnagramRec> inner = null; AssertExtensions.Throws<ArgumentNullException>("inner", () => outer.AsQueryable().Join(inner, e => e.name, e => e.name, (cr, or) => new JoinRec { name = cr.name, orderID = or.orderID, total = or.total }, new AnagramEqualityComparer())); } [Fact] public void OuterKeySelectorNull() { CustomerRec[] outer = new [] { new CustomerRec{ name = "Prakash", custID = 98022 }, new CustomerRec{ name = "Tim", custID = 99021 }, new CustomerRec{ name = "Robert", custID = 99022 } }; AnagramRec[] inner = new [] { new AnagramRec{ name = "miT", orderID = 43455, total = 10 }, new AnagramRec{ name = "Prakash", orderID = 323232, total = 9 } }; AssertExtensions.Throws<ArgumentNullException>("outerKeySelector", () => outer.AsQueryable().Join(inner.AsQueryable(), null, e => e.name, (cr, or) => new JoinRec { name = cr.name, orderID = or.orderID, total = or.total }, new AnagramEqualityComparer())); } [Fact] public void InnerKeySelectorNull() { CustomerRec[] outer = new [] { new CustomerRec{ name = "Prakash", custID = 98022 }, new CustomerRec{ name = "Tim", custID = 99021 }, new CustomerRec{ name = "Robert", custID = 99022 } }; AnagramRec[] inner = new [] { new AnagramRec{ name = "miT", orderID = 43455, total = 10 }, new AnagramRec{ name = "Prakash", orderID = 323232, total = 9 } }; AssertExtensions.Throws<ArgumentNullException>("innerKeySelector", () => outer.AsQueryable().Join(inner.AsQueryable(), e => e.name, null, (cr, or) => new JoinRec { name = cr.name, orderID = or.orderID, total = or.total }, new AnagramEqualityComparer())); } [Fact] public void ResultSelectorNull() { CustomerRec[] outer = new [] { new CustomerRec{ name = "Prakash", custID = 98022 }, new CustomerRec{ name = "Tim", custID = 99021 }, new CustomerRec{ name = "Robert", custID = 99022 } }; AnagramRec[] inner = new [] { new AnagramRec{ name = "miT", orderID = 43455, total = 10 }, new AnagramRec{ name = "Prakash", orderID = 323232, total = 9 } }; AssertExtensions.Throws<ArgumentNullException>("resultSelector", () => outer.AsQueryable().Join(inner.AsQueryable(), e => e.name, e => e.name, (Expression<Func<CustomerRec, AnagramRec, JoinRec>>)null, new AnagramEqualityComparer())); } [Fact] public void OuterNullNoComparer() { IQueryable<CustomerRec> outer = null; AnagramRec[] inner = new[] { new AnagramRec{ name = "miT", orderID = 43455, total = 10 }, new AnagramRec{ name = "Prakash", orderID = 323232, total = 9 } }; AssertExtensions.Throws<ArgumentNullException>("outer", () => outer.Join(inner.AsQueryable(), e => e.name, e => e.name, (cr, or) => new JoinRec { name = cr.name, orderID = or.orderID, total = or.total })); } [Fact] public void InnerNullNoComparer() { CustomerRec[] outer = new[] { new CustomerRec{ name = "Prakash", custID = 98022 }, new CustomerRec{ name = "Tim", custID = 99021 }, new CustomerRec{ name = "Robert", custID = 99022 } }; IQueryable<AnagramRec> inner = null; AssertExtensions.Throws<ArgumentNullException>("inner", () => outer.AsQueryable().Join(inner, e => e.name, e => e.name, (cr, or) => new JoinRec { name = cr.name, orderID = or.orderID, total = or.total })); } [Fact] public void OuterKeySelectorNullNoComparer() { CustomerRec[] outer = new[] { new CustomerRec{ name = "Prakash", custID = 98022 }, new CustomerRec{ name = "Tim", custID = 99021 }, new CustomerRec{ name = "Robert", custID = 99022 } }; AnagramRec[] inner = new[] { new AnagramRec{ name = "miT", orderID = 43455, total = 10 }, new AnagramRec{ name = "Prakash", orderID = 323232, total = 9 } }; AssertExtensions.Throws<ArgumentNullException>("outerKeySelector", () => outer.AsQueryable().Join(inner.AsQueryable(), null, e => e.name, (cr, or) => new JoinRec { name = cr.name, orderID = or.orderID, total = or.total })); } [Fact] public void InnerKeySelectorNullNoComparer() { CustomerRec[] outer = new[] { new CustomerRec{ name = "Prakash", custID = 98022 }, new CustomerRec{ name = "Tim", custID = 99021 }, new CustomerRec{ name = "Robert", custID = 99022 } }; AnagramRec[] inner = new[] { new AnagramRec{ name = "miT", orderID = 43455, total = 10 }, new AnagramRec{ name = "Prakash", orderID = 323232, total = 9 } }; AssertExtensions.Throws<ArgumentNullException>("innerKeySelector", () => outer.AsQueryable().Join(inner.AsQueryable(), e => e.name, null, (cr, or) => new JoinRec { name = cr.name, orderID = or.orderID, total = or.total })); } [Fact] public void ResultSelectorNullNoComparer() { CustomerRec[] outer = new[] { new CustomerRec{ name = "Prakash", custID = 98022 }, new CustomerRec{ name = "Tim", custID = 99021 }, new CustomerRec{ name = "Robert", custID = 99022 } }; AnagramRec[] inner = new[] { new AnagramRec{ name = "miT", orderID = 43455, total = 10 }, new AnagramRec{ name = "Prakash", orderID = 323232, total = 9 } }; AssertExtensions.Throws<ArgumentNullException>("resultSelector", () => outer.AsQueryable().Join(inner.AsQueryable(), e => e.name, e => e.name, (Expression<Func<CustomerRec, AnagramRec, JoinRec>>)null)); } [Fact] public void SelectorsReturnNull() { int?[] inner = { null, null, null }; int?[] outer = { null, null }; Assert.Empty(outer.AsQueryable().Join(inner.AsQueryable(), e => e, e => e, (x, y) => x)); } [Fact] public void Join1() { var count = (new int[] { 0, 1, 2 }).AsQueryable().Join(new int[] { 1, 2, 3 }, n1 => n1, n2 => n2, (n1, n2) => n1 + n2).Count(); Assert.Equal(2, count); } [Fact] public void Join2() { var count = (new int[] { 0, 1, 2 }).AsQueryable().Join(new int[] { 1, 2, 3 }, n1 => n1, n2 => n2, (n1, n2) => n1 + n2, EqualityComparer<int>.Default).Count(); Assert.Equal(2, count); } } }
-1
dotnet/runtime
66,372
Add Stopwatch.GetElapsedTime
Fixes https://github.com/dotnet/runtime/issues/65858
stephentoub
2022-03-09T01:52:28Z
2022-03-09T12:42:15Z
ca731545a58307870a0baebb0ee43eeea61f175f
c9f7f7389e8e9a00d501aef696333b67d218baac
Add Stopwatch.GetElapsedTime. Fixes https://github.com/dotnet/runtime/issues/65858
./src/tests/Loader/classloader/generics/Variance/Methods/Method003.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // This test tests constraints on method type parameters // for generic methods of generic variant interfaces // POSITIVE tests using System; public class C : IMinusT<int> { } public class D : IMinusT<string[]> { } public class E : IMinusT<object> { } public class A5 : Test001PlusT<int> { public void method1<M>(IMinusT<int> t) where M : IMinusT<int> { } } public class A6 : Test002PlusT<string> { public string[] method2<M>(IMinusT<string[]> t) where M : IMinusT<string[]> { return new string[10]; } } public class A7 : Test001MinusT<object> { public IMinusT<object[]> method1<M>(object t) where M : IPlusT<object> { return (IMinusT<object[]>)new E(); } } public class TestClass { static int iTestCount = 0; static int iErrorCount = 0; static int iExitCode = 101; public static void Eval(string location, bool exp) { ++iTestCount; if (!(exp)) { iErrorCount++; Console.WriteLine("Test Failed at location: {0} @ count {1} ", location, iTestCount); } } public static void LoadTypeInternal(string testType) { switch (testType) { case "Test001PlusT": { // positive test // return type: void // argument type: contravariant // method type constraint: contravariant Test001PlusT<int> test = (Test001PlusT<int>)new A5(); test.method1<IMinusT<int>>((IMinusT<int>)new C()); break; } case "Test002PlusT": { // positive test // return type: covariant // argument type: contravariant // method type constraint: contravariant Test002PlusT<string> test = (Test002PlusT<string>)new A6(); string[] st = test.method2<IMinusT<string[]>>((IMinusT<string[]>)new D()); break; } case "Test001MinusT": { // positive test // return type: covariant // argument type: contravariant // method type constraint: contravariant Test001MinusT<object> test = (Test001MinusT<object>)new A7(); IMinusT<object[]> obj = test.method1<IPlusT<object>>(new object()); break; } default: throw new Exception("Unexpected testType"); } } public static void LoadTypeWrapper(string testType) { LoadTypeInternal(testType); } public static bool LoadType(string testType, bool expected) { try { LoadTypeWrapper(testType); if (expected) { return true; } else { return false; } } catch (MissingMethodException) { if (expected) { Console.WriteLine("Unexpected Exception MissingMethodException"); return false; } else { return true; } } catch (TypeLoadException) { if (expected) { Console.WriteLine("Unexpected Exception TypeLoadException"); return false; } else { return true; } } catch (Exception E) { Console.WriteLine("Unexpected Exception {0}", E); return false; } } private static bool RunTests() { // positive Eval("Test101", LoadType("Test001PlusT", true)); Eval("Test102", LoadType("Test002PlusT", true)); Eval("Test103", LoadType("Test001MinusT", true)); if (iErrorCount > 0) { Console.WriteLine("Total test cases: " + iTestCount + " Failed test cases: " + iErrorCount); return false; } else { Console.WriteLine("Total test cases: " + iTestCount); return true; } } public static int Main(String[] args) { if (RunTests()) { iExitCode = 100; Console.WriteLine("All test cases passed"); } else { iExitCode = 101; Console.WriteLine("Test failed"); } return iExitCode; } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // This test tests constraints on method type parameters // for generic methods of generic variant interfaces // POSITIVE tests using System; public class C : IMinusT<int> { } public class D : IMinusT<string[]> { } public class E : IMinusT<object> { } public class A5 : Test001PlusT<int> { public void method1<M>(IMinusT<int> t) where M : IMinusT<int> { } } public class A6 : Test002PlusT<string> { public string[] method2<M>(IMinusT<string[]> t) where M : IMinusT<string[]> { return new string[10]; } } public class A7 : Test001MinusT<object> { public IMinusT<object[]> method1<M>(object t) where M : IPlusT<object> { return (IMinusT<object[]>)new E(); } } public class TestClass { static int iTestCount = 0; static int iErrorCount = 0; static int iExitCode = 101; public static void Eval(string location, bool exp) { ++iTestCount; if (!(exp)) { iErrorCount++; Console.WriteLine("Test Failed at location: {0} @ count {1} ", location, iTestCount); } } public static void LoadTypeInternal(string testType) { switch (testType) { case "Test001PlusT": { // positive test // return type: void // argument type: contravariant // method type constraint: contravariant Test001PlusT<int> test = (Test001PlusT<int>)new A5(); test.method1<IMinusT<int>>((IMinusT<int>)new C()); break; } case "Test002PlusT": { // positive test // return type: covariant // argument type: contravariant // method type constraint: contravariant Test002PlusT<string> test = (Test002PlusT<string>)new A6(); string[] st = test.method2<IMinusT<string[]>>((IMinusT<string[]>)new D()); break; } case "Test001MinusT": { // positive test // return type: covariant // argument type: contravariant // method type constraint: contravariant Test001MinusT<object> test = (Test001MinusT<object>)new A7(); IMinusT<object[]> obj = test.method1<IPlusT<object>>(new object()); break; } default: throw new Exception("Unexpected testType"); } } public static void LoadTypeWrapper(string testType) { LoadTypeInternal(testType); } public static bool LoadType(string testType, bool expected) { try { LoadTypeWrapper(testType); if (expected) { return true; } else { return false; } } catch (MissingMethodException) { if (expected) { Console.WriteLine("Unexpected Exception MissingMethodException"); return false; } else { return true; } } catch (TypeLoadException) { if (expected) { Console.WriteLine("Unexpected Exception TypeLoadException"); return false; } else { return true; } } catch (Exception E) { Console.WriteLine("Unexpected Exception {0}", E); return false; } } private static bool RunTests() { // positive Eval("Test101", LoadType("Test001PlusT", true)); Eval("Test102", LoadType("Test002PlusT", true)); Eval("Test103", LoadType("Test001MinusT", true)); if (iErrorCount > 0) { Console.WriteLine("Total test cases: " + iTestCount + " Failed test cases: " + iErrorCount); return false; } else { Console.WriteLine("Total test cases: " + iTestCount); return true; } } public static int Main(String[] args) { if (RunTests()) { iExitCode = 100; Console.WriteLine("All test cases passed"); } else { iExitCode = 101; Console.WriteLine("Test failed"); } return iExitCode; } }
-1
dotnet/runtime
66,372
Add Stopwatch.GetElapsedTime
Fixes https://github.com/dotnet/runtime/issues/65858
stephentoub
2022-03-09T01:52:28Z
2022-03-09T12:42:15Z
ca731545a58307870a0baebb0ee43eeea61f175f
c9f7f7389e8e9a00d501aef696333b67d218baac
Add Stopwatch.GetElapsedTime. Fixes https://github.com/dotnet/runtime/issues/65858
./src/tests/JIT/Regression/VS-ia64-JIT/M00/b115103/b115103.il
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern legacy library mscorlib {} .assembly extern System.Console { .publickeytoken = (B0 3F 5F 7F 11 D5 0A 3A ) .ver 4:0:0:0 } .assembly 'b115103' {} .assembly extern xunit.core {} .class Test_b115103 { .method static int32 Main() { .custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = ( 01 00 00 00 ) .entrypoint .maxstack 8 ldc.i4 100 ldc.i4.1 ldc.i4.1 switch (sum,fact) pop br stop sum: add br stop fact: mul stop: ret } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern legacy library mscorlib {} .assembly extern System.Console { .publickeytoken = (B0 3F 5F 7F 11 D5 0A 3A ) .ver 4:0:0:0 } .assembly 'b115103' {} .assembly extern xunit.core {} .class Test_b115103 { .method static int32 Main() { .custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = ( 01 00 00 00 ) .entrypoint .maxstack 8 ldc.i4 100 ldc.i4.1 ldc.i4.1 switch (sum,fact) pop br stop sum: add br stop fact: mul stop: ret } }
-1
dotnet/runtime
66,372
Add Stopwatch.GetElapsedTime
Fixes https://github.com/dotnet/runtime/issues/65858
stephentoub
2022-03-09T01:52:28Z
2022-03-09T12:42:15Z
ca731545a58307870a0baebb0ee43eeea61f175f
c9f7f7389e8e9a00d501aef696333b67d218baac
Add Stopwatch.GetElapsedTime. Fixes https://github.com/dotnet/runtime/issues/65858
./src/libraries/System.Security.Cryptography.X509Certificates/tests/CollectionImportTests.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.IO; using System.Linq; using Test.Cryptography; using Xunit; namespace System.Security.Cryptography.X509Certificates.Tests { public static class CollectionImportTests { [Fact] public static void ImportNull() { X509Certificate2Collection cc2 = new X509Certificate2Collection(); Assert.Throws<ArgumentNullException>(() => cc2.Import((byte[])null)); Assert.Throws<ArgumentNullException>(() => cc2.Import((string)null)); } [Fact] public static void ImportEmpty_Pkcs12() { using (ImportedCollection ic = Cert.Import(TestData.EmptyPfx)) { X509Certificate2Collection collection = ic.Collection; Assert.Equal(0, collection.Count); } } [Fact] public static void ImportX509DerBytes() { using (ImportedCollection ic = Cert.Import(TestData.MsCertificate)) { X509Certificate2Collection collection = ic.Collection; Assert.Equal(1, collection.Count); } } [Fact] public static void ImportX509PemBytes() { using (ImportedCollection ic = Cert.Import(TestData.MsCertificatePemBytes)) { X509Certificate2Collection collection = ic.Collection; Assert.Equal(1, collection.Count); } } [Fact] public static void ImportX509DerFile() { using (ImportedCollection ic = Cert.Import(TestFiles.MsCertificateDerFile)) { X509Certificate2Collection collection = ic.Collection; Assert.Equal(1, collection.Count); } } [Fact] public static void ImportX509PemFile() { using (ImportedCollection ic = Cert.Import(TestFiles.MsCertificatePemFile)) { X509Certificate2Collection collection = ic.Collection; Assert.Equal(1, collection.Count); } } [Fact] [SkipOnPlatform(PlatformSupport.MobileAppleCrypto, "PKCS#7 import is not available")] public static void ImportPkcs7DerBytes_Empty() { using (ImportedCollection ic = Cert.Import(TestData.Pkcs7EmptyDerBytes)) { X509Certificate2Collection collection = ic.Collection; Assert.Equal(0, collection.Count); } } [Fact] [SkipOnPlatform(PlatformSupport.MobileAppleCrypto, "PKCS#7 import is not available")] public static void ImportPkcs7PemBytes_Empty() { using (ImportedCollection ic = Cert.Import(TestData.Pkcs7EmptyPemBytes)) { X509Certificate2Collection collection = ic.Collection; Assert.Equal(0, collection.Count); } } [Fact] [SkipOnPlatform(PlatformSupport.MobileAppleCrypto, "PKCS#7 import is not available")] public static void ImportPkcs7DerFile_Empty() { using (ImportedCollection ic = Cert.Import(TestFiles.Pkcs7EmptyDerFile)) { X509Certificate2Collection collection = ic.Collection; Assert.Equal(0, collection.Count); } } [Fact] [SkipOnPlatform(PlatformSupport.MobileAppleCrypto, "PKCS#7 import is not available")] public static void ImportPkcs7PemFile_Empty() { using (ImportedCollection ic = Cert.Import(TestFiles.Pkcs7EmptyPemFile)) { X509Certificate2Collection collection = ic.Collection; Assert.Equal(0, collection.Count); } } [Fact] [SkipOnPlatform(PlatformSupport.MobileAppleCrypto, "PKCS#7 import is not available")] public static void ImportPkcs7DerBytes_Single() { using (ImportedCollection ic = Cert.Import(TestData.Pkcs7SingleDerBytes)) { X509Certificate2Collection collection = ic.Collection; Assert.Equal(1, collection.Count); Assert.Equal("D5B5BC1C458A558845BFF51CB4DFF31C", collection[0].SerialNumber); } } [Fact] [SkipOnPlatform(PlatformSupport.MobileAppleCrypto, "PKCS#7 import is not available")] public static void ImportPkcs7PemBytes_Single() { using (ImportedCollection ic = Cert.Import(TestData.Pkcs7SinglePemBytes)) { X509Certificate2Collection collection = ic.Collection; Assert.Equal(1, collection.Count); Assert.Equal("D5B5BC1C458A558845BFF51CB4DFF31C", collection[0].SerialNumber); } } [Fact] [SkipOnPlatform(PlatformSupport.MobileAppleCrypto, "PKCS#7 import is not available")] public static void ImportPkcs7DerFile_Single() { using (ImportedCollection ic = Cert.Import(TestFiles.Pkcs7SingleDerFile)) { X509Certificate2Collection collection = ic.Collection; Assert.Equal(1, collection.Count); Assert.Equal("D5B5BC1C458A558845BFF51CB4DFF31C", collection[0].SerialNumber); } } [Fact] [SkipOnPlatform(PlatformSupport.MobileAppleCrypto, "PKCS#7 import is not available")] public static void ImportPkcs7PemFile_Single() { using (ImportedCollection ic = Cert.Import(TestFiles.Pkcs7SinglePemFile)) { X509Certificate2Collection collection = ic.Collection; Assert.Equal(1, collection.Count); Assert.Equal("D5B5BC1C458A558845BFF51CB4DFF31C", collection[0].SerialNumber); } } [Fact] [SkipOnPlatform(PlatformSupport.MobileAppleCrypto, "PKCS#7 import is not available")] public static void ImportPkcs7DerBytes_Chain() { using (ImportedCollection ic = Cert.Import(TestData.Pkcs7ChainDerBytes)) { X509Certificate2Collection collection = ic.Collection; Assert.Equal(3, collection.Count); } } [Fact] [SkipOnPlatform(PlatformSupport.MobileAppleCrypto, "PKCS#7 import is not available")] public static void ImportPkcs7DerByteSpan_Chain() { using (ImportedCollection ic = Cert.Import(TestData.Pkcs7ChainDerBytes.AsSpan())) { X509Certificate2Collection collection = ic.Collection; Assert.Equal(3, collection.Count); } } [Fact] [SkipOnPlatform(PlatformSupport.MobileAppleCrypto, "PKCS#7 import is not available")] public static void ImportPkcs7PemBytes_Chain() { using (ImportedCollection ic = Cert.Import(TestData.Pkcs7ChainPemBytes)) { X509Certificate2Collection collection = ic.Collection; Assert.Equal(3, collection.Count); } } [Fact] [SkipOnPlatform(PlatformSupport.MobileAppleCrypto, "PKCS#7 import is not available")] public static void ImportPkcs7PemByteSpan_Chain() { using (ImportedCollection ic = Cert.Import(TestData.Pkcs7ChainPemBytes.AsSpan())) { X509Certificate2Collection collection = ic.Collection; Assert.Equal(3, collection.Count); } } [Fact] [SkipOnPlatform(PlatformSupport.MobileAppleCrypto, "PKCS#7 import is not available")] public static void ImportPkcs7DerFile_Chain() { using (ImportedCollection ic = Cert.Import(TestFiles.Pkcs7ChainDerFile)) { X509Certificate2Collection collection = ic.Collection; Assert.Equal(3, collection.Count); } } [Fact] [SkipOnPlatform(PlatformSupport.MobileAppleCrypto, "PKCS#7 import is not available")] public static void ImportPkcs7PemFile_Chain() { using (ImportedCollection ic = Cert.Import(TestFiles.Pkcs7ChainPemFile)) { X509Certificate2Collection collection = ic.Collection; Assert.Equal(3, collection.Count); } } [Theory] [MemberData(nameof(StorageFlags))] public static void ImportPkcs12Bytes_Single(X509KeyStorageFlags keyStorageFlags) { using (ImportedCollection ic = Cert.Import(TestData.PfxData, TestData.PfxDataPassword, keyStorageFlags)) { X509Certificate2Collection cc2 = ic.Collection; int count = cc2.Count; Assert.Equal(1, count); } } [Theory] [MemberData(nameof(StorageFlags))] public static void ImportPkcs12Bytes_Single_VerifyContents_ArrayString(X509KeyStorageFlags keyStorageFlags) { using (ImportedCollection ic = Cert.Import(TestData.PfxData, TestData.PfxDataPassword, keyStorageFlags)) { ImportPkcs12Bytes_Single_VerifyContents(ic); } } [Theory] [MemberData(nameof(StorageFlags))] public static void ImportPkcs12Bytes_Single_VerifyContents_SpanSpan(X509KeyStorageFlags keyStorageFlags) { ReadOnlySpan<byte> rawData = TestData.PfxData.AsSpan(); ReadOnlySpan<char> password = TestData.PfxDataPassword.AsSpan(); using (ImportedCollection ic = Cert.Import(rawData, password, keyStorageFlags)) { ImportPkcs12Bytes_Single_VerifyContents(ic); } } private static void ImportPkcs12Bytes_Single_VerifyContents(ImportedCollection ic) { using (var pfxCer = new X509Certificate2(TestData.PfxData, TestData.PfxDataPassword, Cert.EphemeralIfPossible)) { X509Certificate2Collection cc2 = ic.Collection; int count = cc2.Count; Assert.Equal(1, count); using (X509Certificate2 c = cc2[0]) { // pfxCer was loaded directly, cc2[0] was Imported, two distinct copies. Assert.NotSame(pfxCer, c); Assert.Equal(pfxCer, c); Assert.Equal(pfxCer.Thumbprint, c.Thumbprint); } } } [Theory] [MemberData(nameof(StorageFlags))] public static void ImportPkcs12File_Single(X509KeyStorageFlags keyStorageFlags) { using (ImportedCollection ic = Cert.Import(TestFiles.PfxFile, TestData.PfxDataPassword, keyStorageFlags)) { X509Certificate2Collection cc2 = ic.Collection; int count = cc2.Count; Assert.Equal(1, count); } } [Theory] [MemberData(nameof(StorageFlags))] public static void ImportPkcs12File_Single_SpanPassword(X509KeyStorageFlags keyStorageFlags) { Span<char> password = stackalloc char[30]; password.Fill('Z'); TestData.PfxDataPassword.AsSpan().CopyTo(password.Slice(1)); password = password.Slice(1, TestData.PfxDataPassword.Length); using (ImportedCollection ic = Cert.Import(TestFiles.PfxFile, password, keyStorageFlags)) { X509Certificate2Collection cc2 = ic.Collection; int count = cc2.Count; Assert.Equal(1, count); } } [Theory] [MemberData(nameof(StorageFlags))] public static void ImportPkcs12Bytes_Chain(X509KeyStorageFlags keyStorageFlags) { using (ImportedCollection ic = Cert.Import(TestData.ChainPfxBytes, TestData.ChainPfxPassword, keyStorageFlags)) { X509Certificate2Collection certs = ic.Collection; int count = certs.Count; Assert.Equal(3, count); } } [Theory] [MemberData(nameof(StorageFlags))] public static void ImportPkcs12ByteSpan_Chain(X509KeyStorageFlags keyStorageFlags) { using (ImportedCollection ic = Cert.Import(TestData.ChainPfxBytes.AsSpan(), TestData.ChainPfxPassword, keyStorageFlags)) { X509Certificate2Collection certs = ic.Collection; int count = certs.Count; Assert.Equal(3, count); } } [Theory] [MemberData(nameof(StorageFlags))] public static void ImportPkcs12ByteSpan_Chain_SpanPassword(X509KeyStorageFlags keyStorageFlags) { ReadOnlySpan<byte> data = TestData.ChainPfxBytes.AsSpan(); ReadOnlySpan<char> password = TestData.ChainPfxPassword.AsSpan(); using (ImportedCollection ic = Cert.Import(data, password, keyStorageFlags)) { X509Certificate2Collection certs = ic.Collection; int count = certs.Count; Assert.Equal(3, count); } } [Theory] [MemberData(nameof(StorageFlags))] public static void ImportPkcs12File_Chain(X509KeyStorageFlags keyStorageFlags) { using (ImportedCollection ic = Cert.Import(TestFiles.ChainPfxFile, TestData.ChainPfxPassword, keyStorageFlags)) { X509Certificate2Collection certs = ic.Collection; int count = certs.Count; Assert.Equal(3, count); } } [Theory] [MemberData(nameof(StorageFlags))] public static void ImportPkcs12File_Chain_VerifyContents(X509KeyStorageFlags keyStorageFlags) { using (ImportedCollection ic = Cert.Import(TestFiles.ChainPfxFile, TestData.ChainPfxPassword, keyStorageFlags)) { X509Certificate2Collection certs = ic.Collection; int count = certs.Count; Assert.Equal(3, count); // Verify that the read ordering is consistent across the platforms string[] expectedSubjects = { "MS Passport Test Sub CA", "MS Passport Test Root CA", "test.local", }; string[] actualSubjects = certs. Select(cert => cert.GetNameInfo(X509NameType.SimpleName, false)). ToArray(); Assert.Equal(expectedSubjects, actualSubjects); // And verify that we have private keys when we expect them bool[] expectedHasPrivateKeys = { false, false, true, }; bool[] actualHasPrivateKeys = certs. Select(cert => cert.HasPrivateKey). ToArray(); Assert.Equal(expectedHasPrivateKeys, actualHasPrivateKeys); } } #if !NO_EPHEMERALKEYSET_AVAILABLE [Fact] public static void InvalidStorageFlags() { X509Certificate2Collection coll = new X509Certificate2Collection(); byte[] nonEmptyBytes = new byte[1]; AssertExtensions.Throws<ArgumentException>( "keyStorageFlags", () => coll.Import(nonEmptyBytes, string.Empty, (X509KeyStorageFlags)0xFF)); AssertExtensions.Throws<ArgumentException>( "keyStorageFlags", () => coll.Import(string.Empty, string.Empty, (X509KeyStorageFlags)0xFF)); // No test is performed here for the ephemeral flag failing downlevel, because the live // binary is always used by default, meaning it doesn't know EphemeralKeySet doesn't exist. } [Fact] public static void InvalidStorageFlags_PersistedEphemeral() { const X509KeyStorageFlags PersistedEphemeral = X509KeyStorageFlags.EphemeralKeySet | X509KeyStorageFlags.PersistKeySet; byte[] nonEmptyBytes = new byte[1]; X509Certificate2Collection coll = new X509Certificate2Collection(); AssertExtensions.Throws<ArgumentException>( "keyStorageFlags", () => coll.Import(nonEmptyBytes, string.Empty, PersistedEphemeral)); AssertExtensions.Throws<ArgumentException>( "keyStorageFlags", () => coll.Import(string.Empty, string.Empty, PersistedEphemeral)); } #endif public static IEnumerable<object[]> StorageFlags { get { yield return new object[] { X509KeyStorageFlags.DefaultKeySet }; #if !NO_EPHEMERALKEYSET_AVAILABLE if (!OperatingSystem.IsMacOS()) yield return new object[] { X509KeyStorageFlags.EphemeralKeySet }; #endif } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.IO; using System.Linq; using Test.Cryptography; using Xunit; namespace System.Security.Cryptography.X509Certificates.Tests { public static class CollectionImportTests { [Fact] public static void ImportNull() { X509Certificate2Collection cc2 = new X509Certificate2Collection(); Assert.Throws<ArgumentNullException>(() => cc2.Import((byte[])null)); Assert.Throws<ArgumentNullException>(() => cc2.Import((string)null)); } [Fact] public static void ImportEmpty_Pkcs12() { using (ImportedCollection ic = Cert.Import(TestData.EmptyPfx)) { X509Certificate2Collection collection = ic.Collection; Assert.Equal(0, collection.Count); } } [Fact] public static void ImportX509DerBytes() { using (ImportedCollection ic = Cert.Import(TestData.MsCertificate)) { X509Certificate2Collection collection = ic.Collection; Assert.Equal(1, collection.Count); } } [Fact] public static void ImportX509PemBytes() { using (ImportedCollection ic = Cert.Import(TestData.MsCertificatePemBytes)) { X509Certificate2Collection collection = ic.Collection; Assert.Equal(1, collection.Count); } } [Fact] public static void ImportX509DerFile() { using (ImportedCollection ic = Cert.Import(TestFiles.MsCertificateDerFile)) { X509Certificate2Collection collection = ic.Collection; Assert.Equal(1, collection.Count); } } [Fact] public static void ImportX509PemFile() { using (ImportedCollection ic = Cert.Import(TestFiles.MsCertificatePemFile)) { X509Certificate2Collection collection = ic.Collection; Assert.Equal(1, collection.Count); } } [Fact] [SkipOnPlatform(PlatformSupport.MobileAppleCrypto, "PKCS#7 import is not available")] public static void ImportPkcs7DerBytes_Empty() { using (ImportedCollection ic = Cert.Import(TestData.Pkcs7EmptyDerBytes)) { X509Certificate2Collection collection = ic.Collection; Assert.Equal(0, collection.Count); } } [Fact] [SkipOnPlatform(PlatformSupport.MobileAppleCrypto, "PKCS#7 import is not available")] public static void ImportPkcs7PemBytes_Empty() { using (ImportedCollection ic = Cert.Import(TestData.Pkcs7EmptyPemBytes)) { X509Certificate2Collection collection = ic.Collection; Assert.Equal(0, collection.Count); } } [Fact] [SkipOnPlatform(PlatformSupport.MobileAppleCrypto, "PKCS#7 import is not available")] public static void ImportPkcs7DerFile_Empty() { using (ImportedCollection ic = Cert.Import(TestFiles.Pkcs7EmptyDerFile)) { X509Certificate2Collection collection = ic.Collection; Assert.Equal(0, collection.Count); } } [Fact] [SkipOnPlatform(PlatformSupport.MobileAppleCrypto, "PKCS#7 import is not available")] public static void ImportPkcs7PemFile_Empty() { using (ImportedCollection ic = Cert.Import(TestFiles.Pkcs7EmptyPemFile)) { X509Certificate2Collection collection = ic.Collection; Assert.Equal(0, collection.Count); } } [Fact] [SkipOnPlatform(PlatformSupport.MobileAppleCrypto, "PKCS#7 import is not available")] public static void ImportPkcs7DerBytes_Single() { using (ImportedCollection ic = Cert.Import(TestData.Pkcs7SingleDerBytes)) { X509Certificate2Collection collection = ic.Collection; Assert.Equal(1, collection.Count); Assert.Equal("D5B5BC1C458A558845BFF51CB4DFF31C", collection[0].SerialNumber); } } [Fact] [SkipOnPlatform(PlatformSupport.MobileAppleCrypto, "PKCS#7 import is not available")] public static void ImportPkcs7PemBytes_Single() { using (ImportedCollection ic = Cert.Import(TestData.Pkcs7SinglePemBytes)) { X509Certificate2Collection collection = ic.Collection; Assert.Equal(1, collection.Count); Assert.Equal("D5B5BC1C458A558845BFF51CB4DFF31C", collection[0].SerialNumber); } } [Fact] [SkipOnPlatform(PlatformSupport.MobileAppleCrypto, "PKCS#7 import is not available")] public static void ImportPkcs7DerFile_Single() { using (ImportedCollection ic = Cert.Import(TestFiles.Pkcs7SingleDerFile)) { X509Certificate2Collection collection = ic.Collection; Assert.Equal(1, collection.Count); Assert.Equal("D5B5BC1C458A558845BFF51CB4DFF31C", collection[0].SerialNumber); } } [Fact] [SkipOnPlatform(PlatformSupport.MobileAppleCrypto, "PKCS#7 import is not available")] public static void ImportPkcs7PemFile_Single() { using (ImportedCollection ic = Cert.Import(TestFiles.Pkcs7SinglePemFile)) { X509Certificate2Collection collection = ic.Collection; Assert.Equal(1, collection.Count); Assert.Equal("D5B5BC1C458A558845BFF51CB4DFF31C", collection[0].SerialNumber); } } [Fact] [SkipOnPlatform(PlatformSupport.MobileAppleCrypto, "PKCS#7 import is not available")] public static void ImportPkcs7DerBytes_Chain() { using (ImportedCollection ic = Cert.Import(TestData.Pkcs7ChainDerBytes)) { X509Certificate2Collection collection = ic.Collection; Assert.Equal(3, collection.Count); } } [Fact] [SkipOnPlatform(PlatformSupport.MobileAppleCrypto, "PKCS#7 import is not available")] public static void ImportPkcs7DerByteSpan_Chain() { using (ImportedCollection ic = Cert.Import(TestData.Pkcs7ChainDerBytes.AsSpan())) { X509Certificate2Collection collection = ic.Collection; Assert.Equal(3, collection.Count); } } [Fact] [SkipOnPlatform(PlatformSupport.MobileAppleCrypto, "PKCS#7 import is not available")] public static void ImportPkcs7PemBytes_Chain() { using (ImportedCollection ic = Cert.Import(TestData.Pkcs7ChainPemBytes)) { X509Certificate2Collection collection = ic.Collection; Assert.Equal(3, collection.Count); } } [Fact] [SkipOnPlatform(PlatformSupport.MobileAppleCrypto, "PKCS#7 import is not available")] public static void ImportPkcs7PemByteSpan_Chain() { using (ImportedCollection ic = Cert.Import(TestData.Pkcs7ChainPemBytes.AsSpan())) { X509Certificate2Collection collection = ic.Collection; Assert.Equal(3, collection.Count); } } [Fact] [SkipOnPlatform(PlatformSupport.MobileAppleCrypto, "PKCS#7 import is not available")] public static void ImportPkcs7DerFile_Chain() { using (ImportedCollection ic = Cert.Import(TestFiles.Pkcs7ChainDerFile)) { X509Certificate2Collection collection = ic.Collection; Assert.Equal(3, collection.Count); } } [Fact] [SkipOnPlatform(PlatformSupport.MobileAppleCrypto, "PKCS#7 import is not available")] public static void ImportPkcs7PemFile_Chain() { using (ImportedCollection ic = Cert.Import(TestFiles.Pkcs7ChainPemFile)) { X509Certificate2Collection collection = ic.Collection; Assert.Equal(3, collection.Count); } } [Theory] [MemberData(nameof(StorageFlags))] public static void ImportPkcs12Bytes_Single(X509KeyStorageFlags keyStorageFlags) { using (ImportedCollection ic = Cert.Import(TestData.PfxData, TestData.PfxDataPassword, keyStorageFlags)) { X509Certificate2Collection cc2 = ic.Collection; int count = cc2.Count; Assert.Equal(1, count); } } [Theory] [MemberData(nameof(StorageFlags))] public static void ImportPkcs12Bytes_Single_VerifyContents_ArrayString(X509KeyStorageFlags keyStorageFlags) { using (ImportedCollection ic = Cert.Import(TestData.PfxData, TestData.PfxDataPassword, keyStorageFlags)) { ImportPkcs12Bytes_Single_VerifyContents(ic); } } [Theory] [MemberData(nameof(StorageFlags))] public static void ImportPkcs12Bytes_Single_VerifyContents_SpanSpan(X509KeyStorageFlags keyStorageFlags) { ReadOnlySpan<byte> rawData = TestData.PfxData.AsSpan(); ReadOnlySpan<char> password = TestData.PfxDataPassword.AsSpan(); using (ImportedCollection ic = Cert.Import(rawData, password, keyStorageFlags)) { ImportPkcs12Bytes_Single_VerifyContents(ic); } } private static void ImportPkcs12Bytes_Single_VerifyContents(ImportedCollection ic) { using (var pfxCer = new X509Certificate2(TestData.PfxData, TestData.PfxDataPassword, Cert.EphemeralIfPossible)) { X509Certificate2Collection cc2 = ic.Collection; int count = cc2.Count; Assert.Equal(1, count); using (X509Certificate2 c = cc2[0]) { // pfxCer was loaded directly, cc2[0] was Imported, two distinct copies. Assert.NotSame(pfxCer, c); Assert.Equal(pfxCer, c); Assert.Equal(pfxCer.Thumbprint, c.Thumbprint); } } } [Theory] [MemberData(nameof(StorageFlags))] public static void ImportPkcs12File_Single(X509KeyStorageFlags keyStorageFlags) { using (ImportedCollection ic = Cert.Import(TestFiles.PfxFile, TestData.PfxDataPassword, keyStorageFlags)) { X509Certificate2Collection cc2 = ic.Collection; int count = cc2.Count; Assert.Equal(1, count); } } [Theory] [MemberData(nameof(StorageFlags))] public static void ImportPkcs12File_Single_SpanPassword(X509KeyStorageFlags keyStorageFlags) { Span<char> password = stackalloc char[30]; password.Fill('Z'); TestData.PfxDataPassword.AsSpan().CopyTo(password.Slice(1)); password = password.Slice(1, TestData.PfxDataPassword.Length); using (ImportedCollection ic = Cert.Import(TestFiles.PfxFile, password, keyStorageFlags)) { X509Certificate2Collection cc2 = ic.Collection; int count = cc2.Count; Assert.Equal(1, count); } } [Theory] [MemberData(nameof(StorageFlags))] public static void ImportPkcs12Bytes_Chain(X509KeyStorageFlags keyStorageFlags) { using (ImportedCollection ic = Cert.Import(TestData.ChainPfxBytes, TestData.ChainPfxPassword, keyStorageFlags)) { X509Certificate2Collection certs = ic.Collection; int count = certs.Count; Assert.Equal(3, count); } } [Theory] [MemberData(nameof(StorageFlags))] public static void ImportPkcs12ByteSpan_Chain(X509KeyStorageFlags keyStorageFlags) { using (ImportedCollection ic = Cert.Import(TestData.ChainPfxBytes.AsSpan(), TestData.ChainPfxPassword, keyStorageFlags)) { X509Certificate2Collection certs = ic.Collection; int count = certs.Count; Assert.Equal(3, count); } } [Theory] [MemberData(nameof(StorageFlags))] public static void ImportPkcs12ByteSpan_Chain_SpanPassword(X509KeyStorageFlags keyStorageFlags) { ReadOnlySpan<byte> data = TestData.ChainPfxBytes.AsSpan(); ReadOnlySpan<char> password = TestData.ChainPfxPassword.AsSpan(); using (ImportedCollection ic = Cert.Import(data, password, keyStorageFlags)) { X509Certificate2Collection certs = ic.Collection; int count = certs.Count; Assert.Equal(3, count); } } [Theory] [MemberData(nameof(StorageFlags))] public static void ImportPkcs12File_Chain(X509KeyStorageFlags keyStorageFlags) { using (ImportedCollection ic = Cert.Import(TestFiles.ChainPfxFile, TestData.ChainPfxPassword, keyStorageFlags)) { X509Certificate2Collection certs = ic.Collection; int count = certs.Count; Assert.Equal(3, count); } } [Theory] [MemberData(nameof(StorageFlags))] public static void ImportPkcs12File_Chain_VerifyContents(X509KeyStorageFlags keyStorageFlags) { using (ImportedCollection ic = Cert.Import(TestFiles.ChainPfxFile, TestData.ChainPfxPassword, keyStorageFlags)) { X509Certificate2Collection certs = ic.Collection; int count = certs.Count; Assert.Equal(3, count); // Verify that the read ordering is consistent across the platforms string[] expectedSubjects = { "MS Passport Test Sub CA", "MS Passport Test Root CA", "test.local", }; string[] actualSubjects = certs. Select(cert => cert.GetNameInfo(X509NameType.SimpleName, false)). ToArray(); Assert.Equal(expectedSubjects, actualSubjects); // And verify that we have private keys when we expect them bool[] expectedHasPrivateKeys = { false, false, true, }; bool[] actualHasPrivateKeys = certs. Select(cert => cert.HasPrivateKey). ToArray(); Assert.Equal(expectedHasPrivateKeys, actualHasPrivateKeys); } } #if !NO_EPHEMERALKEYSET_AVAILABLE [Fact] public static void InvalidStorageFlags() { X509Certificate2Collection coll = new X509Certificate2Collection(); byte[] nonEmptyBytes = new byte[1]; AssertExtensions.Throws<ArgumentException>( "keyStorageFlags", () => coll.Import(nonEmptyBytes, string.Empty, (X509KeyStorageFlags)0xFF)); AssertExtensions.Throws<ArgumentException>( "keyStorageFlags", () => coll.Import(string.Empty, string.Empty, (X509KeyStorageFlags)0xFF)); // No test is performed here for the ephemeral flag failing downlevel, because the live // binary is always used by default, meaning it doesn't know EphemeralKeySet doesn't exist. } [Fact] public static void InvalidStorageFlags_PersistedEphemeral() { const X509KeyStorageFlags PersistedEphemeral = X509KeyStorageFlags.EphemeralKeySet | X509KeyStorageFlags.PersistKeySet; byte[] nonEmptyBytes = new byte[1]; X509Certificate2Collection coll = new X509Certificate2Collection(); AssertExtensions.Throws<ArgumentException>( "keyStorageFlags", () => coll.Import(nonEmptyBytes, string.Empty, PersistedEphemeral)); AssertExtensions.Throws<ArgumentException>( "keyStorageFlags", () => coll.Import(string.Empty, string.Empty, PersistedEphemeral)); } #endif public static IEnumerable<object[]> StorageFlags { get { yield return new object[] { X509KeyStorageFlags.DefaultKeySet }; #if !NO_EPHEMERALKEYSET_AVAILABLE if (!OperatingSystem.IsMacOS()) yield return new object[] { X509KeyStorageFlags.EphemeralKeySet }; #endif } } } }
-1
dotnet/runtime
66,372
Add Stopwatch.GetElapsedTime
Fixes https://github.com/dotnet/runtime/issues/65858
stephentoub
2022-03-09T01:52:28Z
2022-03-09T12:42:15Z
ca731545a58307870a0baebb0ee43eeea61f175f
c9f7f7389e8e9a00d501aef696333b67d218baac
Add Stopwatch.GetElapsedTime. Fixes https://github.com/dotnet/runtime/issues/65858
./src/libraries/System.ServiceModel.Syndication/tests/TestFeeds/AtomFeeds/generator-escaped-html.xml
<!-- Description: generator with text that appears to contain escaped HTML produces a warning Expect: ContainsHTML{element:generator,parent:feed} --> <feed xmlns="http://www.w3.org/2005/Atom"> <title>Example Feed</title> <link href="http://contoso.com/"/> <updated>2003-12-13T18:30:02Z</updated> <author> <name>Author Name</name> </author> <id>urn:uuid:60a76c80-d399-11d9-b93C-0003939e0af6</id> <generator uri="/generator">&lt;b&gt;The&lt;/b&gt; generator</generator> <entry> <title>Atom-Powered Robots Run Amok</title> <link href="http://contoso.com/2003/12/13/atom03"/> <id>urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a</id> <updated>2003-12-13T18:30:02Z</updated> <summary>Some text.</summary> </entry> </feed>
<!-- Description: generator with text that appears to contain escaped HTML produces a warning Expect: ContainsHTML{element:generator,parent:feed} --> <feed xmlns="http://www.w3.org/2005/Atom"> <title>Example Feed</title> <link href="http://contoso.com/"/> <updated>2003-12-13T18:30:02Z</updated> <author> <name>Author Name</name> </author> <id>urn:uuid:60a76c80-d399-11d9-b93C-0003939e0af6</id> <generator uri="/generator">&lt;b&gt;The&lt;/b&gt; generator</generator> <entry> <title>Atom-Powered Robots Run Amok</title> <link href="http://contoso.com/2003/12/13/atom03"/> <id>urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a</id> <updated>2003-12-13T18:30:02Z</updated> <summary>Some text.</summary> </entry> </feed>
-1
dotnet/runtime
66,372
Add Stopwatch.GetElapsedTime
Fixes https://github.com/dotnet/runtime/issues/65858
stephentoub
2022-03-09T01:52:28Z
2022-03-09T12:42:15Z
ca731545a58307870a0baebb0ee43eeea61f175f
c9f7f7389e8e9a00d501aef696333b67d218baac
Add Stopwatch.GetElapsedTime. Fixes https://github.com/dotnet/runtime/issues/65858
./src/libraries/System.Text.Json/src/System/Text/Json/Serialization/JsonConverterOfT.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Diagnostics; using System.Text.Json.Serialization.Converters; using System.Text.Json.Serialization.Metadata; namespace System.Text.Json.Serialization { /// <summary> /// Converts an object or value to or from JSON. /// </summary> /// <typeparam name="T">The <see cref="Type"/> to convert.</typeparam> public abstract partial class JsonConverter<T> : JsonConverter { /// <summary> /// When overidden, constructs a new <see cref="JsonConverter{T}"/> instance. /// </summary> protected internal JsonConverter() { IsValueType = typeof(T).IsValueType; IsInternalConverter = GetType().Assembly == typeof(JsonConverter).Assembly; if (HandleNull) { HandleNullOnRead = true; HandleNullOnWrite = true; } else { // For the HandleNull == false case, either: // 1) The default values are assigned in this type's virtual HandleNull property // or // 2) A converter overrode HandleNull and returned false so HandleNullOnRead and HandleNullOnWrite // will be their default values of false. } CanUseDirectReadOrWrite = ConverterStrategy == ConverterStrategy.Value && IsInternalConverter; RequiresReadAhead = ConverterStrategy == ConverterStrategy.Value; } /// <summary> /// Determines whether the type can be converted. /// </summary> /// <remarks> /// The default implementation is to return True when <paramref name="typeToConvert"/> equals typeof(T). /// </remarks> /// <param name="typeToConvert"></param> /// <returns>True if the type can be converted, False otherwise.</returns> public override bool CanConvert(Type typeToConvert) { return typeToConvert == typeof(T); } internal override ConverterStrategy ConverterStrategy => ConverterStrategy.Value; internal sealed override JsonPropertyInfo CreateJsonPropertyInfo() { return new JsonPropertyInfo<T>(); } internal override sealed JsonParameterInfo CreateJsonParameterInfo() { return new JsonParameterInfo<T>(); } internal override Type? KeyType => null; internal override Type? ElementType => null; /// <summary> /// Indicates whether <see langword="null"/> should be passed to the converter on serialization, /// and whether <see cref="JsonTokenType.Null"/> should be passed on deserialization. /// </summary> /// <remarks> /// The default value is <see langword="true"/> for converters based on value types, and <see langword="false"/> for converters based on reference types. /// </remarks> public virtual bool HandleNull { get { // HandleNull is only called by the framework once during initialization and any // subsequent calls elsewhere would just re-initialize to the same values (we don't // track a "hasInitialized" flag since that isn't necessary). // If the type doesn't support null, allow the converter a chance to modify. // These semantics are backwards compatible with 3.0. HandleNullOnRead = default(T) is not null; // The framework handles null automatically on writes. HandleNullOnWrite = false; return false; } } /// <summary> /// Does the converter want to be called when reading null tokens. /// </summary> internal bool HandleNullOnRead { get; private set; } /// <summary> /// Does the converter want to be called for null values. /// </summary> internal bool HandleNullOnWrite { get; private set; } // This non-generic API is sealed as it just forwards to the generic version. internal sealed override bool TryWriteAsObject(Utf8JsonWriter writer, object? value, JsonSerializerOptions options, ref WriteStack state) { T valueOfT = (T)value!; return TryWrite(writer, valueOfT, options, ref state); } // Provide a default implementation for value converters. internal virtual bool OnTryWrite(Utf8JsonWriter writer, T value, JsonSerializerOptions options, ref WriteStack state) { Write(writer, value, options); return true; } // Provide a default implementation for value converters. internal virtual bool OnTryRead(ref Utf8JsonReader reader, Type typeToConvert, JsonSerializerOptions options, ref ReadStack state, out T? value) { value = Read(ref reader, typeToConvert, options); return true; } /// <summary> /// Read and convert the JSON to T. /// </summary> /// <remarks> /// A converter may throw any Exception, but should throw <cref>JsonException</cref> when the JSON is invalid. /// </remarks> /// <param name="reader">The <see cref="Utf8JsonReader"/> to read from.</param> /// <param name="typeToConvert">The <see cref="Type"/> being converted.</param> /// <param name="options">The <see cref="JsonSerializerOptions"/> being used.</param> /// <returns>The value that was converted.</returns> /// <remarks>Note that the value of <seealso cref="HandleNull"/> determines if the converter handles null JSON tokens.</remarks> public abstract T? Read(ref Utf8JsonReader reader, Type typeToConvert, JsonSerializerOptions options); internal bool TryRead(ref Utf8JsonReader reader, Type typeToConvert, JsonSerializerOptions options, ref ReadStack state, out T? value) { // For perf and converter simplicity, handle null here instead of forwarding to the converter. if (reader.TokenType == JsonTokenType.Null && !HandleNullOnRead && !state.IsContinuation) { if (default(T) is not null) { ThrowHelper.ThrowJsonException_DeserializeUnableToConvertValue(TypeToConvert); } value = default; return true; } if (ConverterStrategy == ConverterStrategy.Value) { // A value converter should never be within a continuation. Debug.Assert(!state.IsContinuation); #if !DEBUG // For performance, only perform validation on internal converters on debug builds. if (IsInternalConverter) { if (state.Current.NumberHandling != null && IsInternalConverterForNumberType) { value = ReadNumberWithCustomHandling(ref reader, state.Current.NumberHandling.Value, options); } else { value = Read(ref reader, typeToConvert, options); } } else #endif { JsonTokenType originalPropertyTokenType = reader.TokenType; int originalPropertyDepth = reader.CurrentDepth; long originalPropertyBytesConsumed = reader.BytesConsumed; if (state.Current.NumberHandling != null && IsInternalConverterForNumberType) { value = ReadNumberWithCustomHandling(ref reader, state.Current.NumberHandling.Value, options); } else { value = Read(ref reader, typeToConvert, options); } VerifyRead( originalPropertyTokenType, originalPropertyDepth, originalPropertyBytesConsumed, isValueConverter: true, ref reader); } return true; } Debug.Assert(IsInternalConverter); bool isContinuation = state.IsContinuation; bool success; #if DEBUG // DEBUG: ensure push/pop operations preserve stack integrity JsonTypeInfo originalJsonTypeInfo = state.Current.JsonTypeInfo; #endif state.Push(); Debug.Assert(TypeToConvert == state.Current.JsonTypeInfo.Type); #if DEBUG // For performance, only perform validation on internal converters on debug builds. if (!isContinuation) { Debug.Assert(state.Current.OriginalTokenType == JsonTokenType.None); state.Current.OriginalTokenType = reader.TokenType; Debug.Assert(state.Current.OriginalDepth == 0); state.Current.OriginalDepth = reader.CurrentDepth; } #endif success = OnTryRead(ref reader, typeToConvert, options, ref state, out value); #if DEBUG if (success) { if (state.IsContinuation) { // The resumable converter did not forward to the next converter that previously returned false. ThrowHelper.ThrowJsonException_SerializationConverterRead(this); } VerifyRead( state.Current.OriginalTokenType, state.Current.OriginalDepth, bytesConsumed: 0, isValueConverter: false, ref reader); // No need to clear state.Current.* since a stack pop will occur. } #endif state.Pop(success); #if DEBUG Debug.Assert(ReferenceEquals(originalJsonTypeInfo, state.Current.JsonTypeInfo)); #endif return success; } internal override sealed bool TryReadAsObject(ref Utf8JsonReader reader, JsonSerializerOptions options, ref ReadStack state, out object? value) { bool success = TryRead(ref reader, TypeToConvert, options, ref state, out T? typedValue); value = typedValue; return success; } /// <summary> /// Performance optimization. /// The 'in' modifier in 'TryWrite(in T Value)' causes boxing for Nullable{T}, so this helper avoids that. /// TODO: Remove this work-around once https://github.com/dotnet/runtime/issues/50915 is addressed. /// </summary> private static bool IsNull(T value) => value is null; internal bool TryWrite(Utf8JsonWriter writer, in T value, JsonSerializerOptions options, ref WriteStack state) { if (writer.CurrentDepth >= options.EffectiveMaxDepth) { ThrowHelper.ThrowJsonException_SerializerCycleDetected(options.EffectiveMaxDepth); } if (default(T) is null && !HandleNullOnWrite && IsNull(value)) { // We do not pass null values to converters unless HandleNullOnWrite is true. Null values for properties were // already handled in GetMemberAndWriteJson() so we don't need to check for IgnoreNullValues here. writer.WriteNullValue(); return true; } if (ConverterStrategy == ConverterStrategy.Value) { Debug.Assert(!state.IsContinuation); int originalPropertyDepth = writer.CurrentDepth; if (state.Current.NumberHandling != null && IsInternalConverterForNumberType) { WriteNumberWithCustomHandling(writer, value, state.Current.NumberHandling.Value); } else { Write(writer, value, options); } VerifyWrite(originalPropertyDepth, writer); return true; } Debug.Assert(IsInternalConverter); bool isContinuation = state.IsContinuation; bool success; if ( #if NETCOREAPP // Short-circuit the check against "is not null"; treated as a constant by recent versions of the JIT. !typeof(T).IsValueType && #else !IsValueType && #endif value is not null && // Do not handle objects that have already been // handled by a polymorphic converter for a base type. state.Current.PolymorphicSerializationState != PolymorphicSerializationState.PolymorphicReEntryStarted) { JsonConverter? polymorphicConverter = CanBePolymorphic ? state.Current.ResolvePolymorphicConverter(value, TypeToConvert, options) : null; Debug.Assert(polymorphicConverter is null || state.CurrentDepth > 0, "root-level polymorphic converters should not be handled here."); if (!isContinuation) { switch (options.ReferenceHandlingStrategy) { case ReferenceHandlingStrategy.IgnoreCycles: ReferenceResolver resolver = state.ReferenceResolver; if (resolver.ContainsReferenceForCycleDetection(value)) { writer.WriteNullValue(); return true; } resolver.PushReferenceForCycleDetection(value); // WriteStack reuses root-level stackframes for its children as a performance optimization; // we want to avoid writing any data for the root-level object to avoid corrupting the stack. // This is fine since popping the root object at the end of serialization is not essential. state.Current.IsPushedReferenceForCycleDetection = state.CurrentDepth > 0; break; case ReferenceHandlingStrategy.Preserve: bool canHaveIdMetata = polymorphicConverter?.CanHaveIdMetadata ?? CanHaveIdMetadata; if (canHaveIdMetata && JsonSerializer.TryGetReferenceForValue(value, ref state, writer)) { // We found a repeating reference and wrote the relevant metadata; serialization complete. return true; } break; default: Debug.Assert(options.ReferenceHandlingStrategy == ReferenceHandlingStrategy.None); break; } } if (polymorphicConverter is not null) { Debug.Assert(!polymorphicConverter.CanBePolymorphic, "Only ObjectConverter supports polymorphism."); state.Current.EnterPolymorphicConverter(); success = polymorphicConverter.TryWriteAsObject(writer, value, options, ref state); state.Current.ExitPolymorphicConverter(success); if (success) { if (state.Current.IsPushedReferenceForCycleDetection) { state.ReferenceResolver.PopReferenceForCycleDetection(); state.Current.IsPushedReferenceForCycleDetection = false; } } return success; } } #if DEBUG // DEBUG: ensure push/pop operations preserve stack integrity JsonTypeInfo originalJsonTypeInfo = state.Current.JsonTypeInfo; #endif state.Push(); Debug.Assert(TypeToConvert == state.Current.JsonTypeInfo.Type); #if DEBUG // For performance, only perform validation on internal converters on debug builds. if (!isContinuation) { Debug.Assert(state.Current.OriginalDepth == 0); state.Current.OriginalDepth = writer.CurrentDepth; } #endif success = OnTryWrite(writer, value, options, ref state); #if DEBUG if (success) { VerifyWrite(state.Current.OriginalDepth, writer); } #endif state.Pop(success); if (success && state.Current.IsPushedReferenceForCycleDetection) { state.ReferenceResolver.PopReferenceForCycleDetection(); state.Current.IsPushedReferenceForCycleDetection = false; } #if DEBUG Debug.Assert(ReferenceEquals(originalJsonTypeInfo, state.Current.JsonTypeInfo)); #endif return success; } internal bool TryWriteDataExtensionProperty(Utf8JsonWriter writer, T value, JsonSerializerOptions options, ref WriteStack state) { Debug.Assert(value != null); if (!IsInternalConverter) { return TryWrite(writer, value, options, ref state); } JsonDictionaryConverter<T>? dictionaryConverter = this as JsonDictionaryConverter<T> ?? (this as JsonMetadataServicesConverter<T>)?.Converter as JsonDictionaryConverter<T>; if (dictionaryConverter == null) { // If not JsonDictionaryConverter<T> then we are JsonObject. // Avoid a type reference to JsonObject and its converter to support trimming. Debug.Assert(TypeToConvert == typeof(Nodes.JsonObject)); return TryWrite(writer, value, options, ref state); } if (writer.CurrentDepth >= options.EffectiveMaxDepth) { ThrowHelper.ThrowJsonException_SerializerCycleDetected(options.EffectiveMaxDepth); } bool isContinuation = state.IsContinuation; bool success; state.Push(); if (!isContinuation) { Debug.Assert(state.Current.OriginalDepth == 0); state.Current.OriginalDepth = writer.CurrentDepth; } // Extension data properties change how dictionary key naming policies are applied. state.Current.IsWritingExtensionDataProperty = true; state.Current.JsonPropertyInfo = state.Current.JsonTypeInfo.ElementTypeInfo!.PropertyInfoForTypeInfo; success = dictionaryConverter.OnWriteResume(writer, value, options, ref state); if (success) { VerifyWrite(state.Current.OriginalDepth, writer); } state.Pop(success); return success; } internal sealed override Type TypeToConvert => typeof(T); internal void VerifyRead(JsonTokenType tokenType, int depth, long bytesConsumed, bool isValueConverter, ref Utf8JsonReader reader) { Debug.Assert(isValueConverter == (ConverterStrategy == ConverterStrategy.Value)); switch (tokenType) { case JsonTokenType.StartArray: if (reader.TokenType != JsonTokenType.EndArray) { ThrowHelper.ThrowJsonException_SerializationConverterRead(this); } else if (depth != reader.CurrentDepth) { ThrowHelper.ThrowJsonException_SerializationConverterRead(this); } break; case JsonTokenType.StartObject: if (reader.TokenType != JsonTokenType.EndObject) { ThrowHelper.ThrowJsonException_SerializationConverterRead(this); } else if (depth != reader.CurrentDepth) { ThrowHelper.ThrowJsonException_SerializationConverterRead(this); } break; default: if (isValueConverter) { // A value converter should not make any reads. if (reader.BytesConsumed != bytesConsumed) { ThrowHelper.ThrowJsonException_SerializationConverterRead(this); } } else { // A non-value converter (object or collection) should always have Start and End tokens // unless it is polymorphic or supports null value reads. if (!CanBePolymorphic && !(HandleNullOnRead && tokenType == JsonTokenType.Null)) { ThrowHelper.ThrowJsonException_SerializationConverterRead(this); } } // Should not be possible to change token type. Debug.Assert(reader.TokenType == tokenType); break; } } internal void VerifyWrite(int originalDepth, Utf8JsonWriter writer) { if (originalDepth != writer.CurrentDepth) { ThrowHelper.ThrowJsonException_SerializationConverterWrite(this); } } /// <summary> /// Write the value as JSON. /// </summary> /// <remarks> /// A converter may throw any Exception, but should throw <cref>JsonException</cref> when the JSON /// cannot be created. /// </remarks> /// <param name="writer">The <see cref="Utf8JsonWriter"/> to write to.</param> /// <param name="value">The value to convert. Note that the value of <seealso cref="HandleNull"/> determines if the converter handles <see langword="null" /> values.</param> /// <param name="options">The <see cref="JsonSerializerOptions"/> being used.</param> public abstract void Write( Utf8JsonWriter writer, #nullable disable // T may or may not be nullable depending on the derived type's overload. T value, #nullable restore JsonSerializerOptions options); /// <summary> /// Reads a dictionary key from a JSON property name. /// </summary> /// <param name="reader">The <see cref="Utf8JsonReader"/> to read from.</param> /// <param name="typeToConvert">The <see cref="Type"/> being converted.</param> /// <param name="options">The <see cref="JsonSerializerOptions"/> being used.</param> /// <returns>The value that was converted.</returns> /// <remarks>Method should be overridden in custom converters of types used in deserialized dictionary keys.</remarks> public virtual T ReadAsPropertyName(ref Utf8JsonReader reader, Type typeToConvert, JsonSerializerOptions options) { if (!IsInternalConverter && options.TryGetDefaultSimpleConverter(TypeToConvert, out JsonConverter? defaultConverter)) { // .NET 5 backward compatibility: hardcode the default converter for primitive key serialization. Debug.Assert(defaultConverter.IsInternalConverter && defaultConverter is JsonConverter<T>); return ((JsonConverter<T>)defaultConverter).ReadAsPropertyNameCore(ref reader, TypeToConvert, options); } ThrowHelper.ThrowNotSupportedException_DictionaryKeyTypeNotSupported(TypeToConvert, this); return default; } internal virtual T ReadAsPropertyNameCore(ref Utf8JsonReader reader, Type typeToConvert, JsonSerializerOptions options) { Debug.Assert(reader.TokenType == JsonTokenType.PropertyName); long originalBytesConsumed = reader.BytesConsumed; T result = ReadAsPropertyName(ref reader, typeToConvert, options); if (reader.BytesConsumed != originalBytesConsumed) { ThrowHelper.ThrowJsonException_SerializationConverterRead(this); } return result; } /// <summary> /// Writes a dictionary key as a JSON property name. /// </summary> /// <param name="writer">The <see cref="Utf8JsonWriter"/> to write to.</param> /// <param name="value">The value to convert. Note that the value of <seealso cref="HandleNull"/> determines if the converter handles <see langword="null" /> values.</param> /// <param name="options">The <see cref="JsonSerializerOptions"/> being used.</param> /// <remarks>Method should be overridden in custom converters of types used in serialized dictionary keys.</remarks> public virtual void WriteAsPropertyName(Utf8JsonWriter writer, T value, JsonSerializerOptions options) { if (!IsInternalConverter && options.TryGetDefaultSimpleConverter(TypeToConvert, out JsonConverter? defaultConverter)) { // .NET 5 backward compatibility: hardcode the default converter for primitive key serialization. Debug.Assert(defaultConverter.IsInternalConverter && defaultConverter is JsonConverter<T>); ((JsonConverter<T>)defaultConverter).WriteAsPropertyNameCore(writer, value, options, isWritingExtensionDataProperty: false); return; } ThrowHelper.ThrowNotSupportedException_DictionaryKeyTypeNotSupported(TypeToConvert, this); } internal virtual void WriteAsPropertyNameCore(Utf8JsonWriter writer, T value, JsonSerializerOptions options, bool isWritingExtensionDataProperty) { if (isWritingExtensionDataProperty) { // Extension data is meant as mechanism to gather unused JSON properties; // do not apply any custom key conversions and hardcode the default behavior. Debug.Assert(!IsInternalConverter && TypeToConvert == typeof(string)); writer.WritePropertyName((string)(object)value!); return; } int originalDepth = writer.CurrentDepth; WriteAsPropertyName(writer, value, options); if (originalDepth != writer.CurrentDepth || writer.TokenType != JsonTokenType.PropertyName) { ThrowHelper.ThrowJsonException_SerializationConverterWrite(this); } } internal sealed override void WriteAsPropertyNameCoreAsObject(Utf8JsonWriter writer, object value, JsonSerializerOptions options, bool isWritingExtensionDataProperty) => WriteAsPropertyNameCore(writer, (T)value, options, isWritingExtensionDataProperty); internal virtual T ReadNumberWithCustomHandling(ref Utf8JsonReader reader, JsonNumberHandling handling, JsonSerializerOptions options) => throw new InvalidOperationException(); internal virtual void WriteNumberWithCustomHandling(Utf8JsonWriter writer, T value, JsonNumberHandling handling) => throw new InvalidOperationException(); } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Diagnostics; using System.Text.Json.Serialization.Converters; using System.Text.Json.Serialization.Metadata; namespace System.Text.Json.Serialization { /// <summary> /// Converts an object or value to or from JSON. /// </summary> /// <typeparam name="T">The <see cref="Type"/> to convert.</typeparam> public abstract partial class JsonConverter<T> : JsonConverter { /// <summary> /// When overidden, constructs a new <see cref="JsonConverter{T}"/> instance. /// </summary> protected internal JsonConverter() { IsValueType = typeof(T).IsValueType; IsInternalConverter = GetType().Assembly == typeof(JsonConverter).Assembly; if (HandleNull) { HandleNullOnRead = true; HandleNullOnWrite = true; } else { // For the HandleNull == false case, either: // 1) The default values are assigned in this type's virtual HandleNull property // or // 2) A converter overrode HandleNull and returned false so HandleNullOnRead and HandleNullOnWrite // will be their default values of false. } CanUseDirectReadOrWrite = ConverterStrategy == ConverterStrategy.Value && IsInternalConverter; RequiresReadAhead = ConverterStrategy == ConverterStrategy.Value; } /// <summary> /// Determines whether the type can be converted. /// </summary> /// <remarks> /// The default implementation is to return True when <paramref name="typeToConvert"/> equals typeof(T). /// </remarks> /// <param name="typeToConvert"></param> /// <returns>True if the type can be converted, False otherwise.</returns> public override bool CanConvert(Type typeToConvert) { return typeToConvert == typeof(T); } internal override ConverterStrategy ConverterStrategy => ConverterStrategy.Value; internal sealed override JsonPropertyInfo CreateJsonPropertyInfo() { return new JsonPropertyInfo<T>(); } internal override sealed JsonParameterInfo CreateJsonParameterInfo() { return new JsonParameterInfo<T>(); } internal override Type? KeyType => null; internal override Type? ElementType => null; /// <summary> /// Indicates whether <see langword="null"/> should be passed to the converter on serialization, /// and whether <see cref="JsonTokenType.Null"/> should be passed on deserialization. /// </summary> /// <remarks> /// The default value is <see langword="true"/> for converters based on value types, and <see langword="false"/> for converters based on reference types. /// </remarks> public virtual bool HandleNull { get { // HandleNull is only called by the framework once during initialization and any // subsequent calls elsewhere would just re-initialize to the same values (we don't // track a "hasInitialized" flag since that isn't necessary). // If the type doesn't support null, allow the converter a chance to modify. // These semantics are backwards compatible with 3.0. HandleNullOnRead = default(T) is not null; // The framework handles null automatically on writes. HandleNullOnWrite = false; return false; } } /// <summary> /// Does the converter want to be called when reading null tokens. /// </summary> internal bool HandleNullOnRead { get; private set; } /// <summary> /// Does the converter want to be called for null values. /// </summary> internal bool HandleNullOnWrite { get; private set; } // This non-generic API is sealed as it just forwards to the generic version. internal sealed override bool TryWriteAsObject(Utf8JsonWriter writer, object? value, JsonSerializerOptions options, ref WriteStack state) { T valueOfT = (T)value!; return TryWrite(writer, valueOfT, options, ref state); } // Provide a default implementation for value converters. internal virtual bool OnTryWrite(Utf8JsonWriter writer, T value, JsonSerializerOptions options, ref WriteStack state) { Write(writer, value, options); return true; } // Provide a default implementation for value converters. internal virtual bool OnTryRead(ref Utf8JsonReader reader, Type typeToConvert, JsonSerializerOptions options, ref ReadStack state, out T? value) { value = Read(ref reader, typeToConvert, options); return true; } /// <summary> /// Read and convert the JSON to T. /// </summary> /// <remarks> /// A converter may throw any Exception, but should throw <cref>JsonException</cref> when the JSON is invalid. /// </remarks> /// <param name="reader">The <see cref="Utf8JsonReader"/> to read from.</param> /// <param name="typeToConvert">The <see cref="Type"/> being converted.</param> /// <param name="options">The <see cref="JsonSerializerOptions"/> being used.</param> /// <returns>The value that was converted.</returns> /// <remarks>Note that the value of <seealso cref="HandleNull"/> determines if the converter handles null JSON tokens.</remarks> public abstract T? Read(ref Utf8JsonReader reader, Type typeToConvert, JsonSerializerOptions options); internal bool TryRead(ref Utf8JsonReader reader, Type typeToConvert, JsonSerializerOptions options, ref ReadStack state, out T? value) { // For perf and converter simplicity, handle null here instead of forwarding to the converter. if (reader.TokenType == JsonTokenType.Null && !HandleNullOnRead && !state.IsContinuation) { if (default(T) is not null) { ThrowHelper.ThrowJsonException_DeserializeUnableToConvertValue(TypeToConvert); } value = default; return true; } if (ConverterStrategy == ConverterStrategy.Value) { // A value converter should never be within a continuation. Debug.Assert(!state.IsContinuation); #if !DEBUG // For performance, only perform validation on internal converters on debug builds. if (IsInternalConverter) { if (state.Current.NumberHandling != null && IsInternalConverterForNumberType) { value = ReadNumberWithCustomHandling(ref reader, state.Current.NumberHandling.Value, options); } else { value = Read(ref reader, typeToConvert, options); } } else #endif { JsonTokenType originalPropertyTokenType = reader.TokenType; int originalPropertyDepth = reader.CurrentDepth; long originalPropertyBytesConsumed = reader.BytesConsumed; if (state.Current.NumberHandling != null && IsInternalConverterForNumberType) { value = ReadNumberWithCustomHandling(ref reader, state.Current.NumberHandling.Value, options); } else { value = Read(ref reader, typeToConvert, options); } VerifyRead( originalPropertyTokenType, originalPropertyDepth, originalPropertyBytesConsumed, isValueConverter: true, ref reader); } return true; } Debug.Assert(IsInternalConverter); bool isContinuation = state.IsContinuation; bool success; #if DEBUG // DEBUG: ensure push/pop operations preserve stack integrity JsonTypeInfo originalJsonTypeInfo = state.Current.JsonTypeInfo; #endif state.Push(); Debug.Assert(TypeToConvert == state.Current.JsonTypeInfo.Type); #if DEBUG // For performance, only perform validation on internal converters on debug builds. if (!isContinuation) { Debug.Assert(state.Current.OriginalTokenType == JsonTokenType.None); state.Current.OriginalTokenType = reader.TokenType; Debug.Assert(state.Current.OriginalDepth == 0); state.Current.OriginalDepth = reader.CurrentDepth; } #endif success = OnTryRead(ref reader, typeToConvert, options, ref state, out value); #if DEBUG if (success) { if (state.IsContinuation) { // The resumable converter did not forward to the next converter that previously returned false. ThrowHelper.ThrowJsonException_SerializationConverterRead(this); } VerifyRead( state.Current.OriginalTokenType, state.Current.OriginalDepth, bytesConsumed: 0, isValueConverter: false, ref reader); // No need to clear state.Current.* since a stack pop will occur. } #endif state.Pop(success); #if DEBUG Debug.Assert(ReferenceEquals(originalJsonTypeInfo, state.Current.JsonTypeInfo)); #endif return success; } internal override sealed bool TryReadAsObject(ref Utf8JsonReader reader, JsonSerializerOptions options, ref ReadStack state, out object? value) { bool success = TryRead(ref reader, TypeToConvert, options, ref state, out T? typedValue); value = typedValue; return success; } /// <summary> /// Performance optimization. /// The 'in' modifier in 'TryWrite(in T Value)' causes boxing for Nullable{T}, so this helper avoids that. /// TODO: Remove this work-around once https://github.com/dotnet/runtime/issues/50915 is addressed. /// </summary> private static bool IsNull(T value) => value is null; internal bool TryWrite(Utf8JsonWriter writer, in T value, JsonSerializerOptions options, ref WriteStack state) { if (writer.CurrentDepth >= options.EffectiveMaxDepth) { ThrowHelper.ThrowJsonException_SerializerCycleDetected(options.EffectiveMaxDepth); } if (default(T) is null && !HandleNullOnWrite && IsNull(value)) { // We do not pass null values to converters unless HandleNullOnWrite is true. Null values for properties were // already handled in GetMemberAndWriteJson() so we don't need to check for IgnoreNullValues here. writer.WriteNullValue(); return true; } if (ConverterStrategy == ConverterStrategy.Value) { Debug.Assert(!state.IsContinuation); int originalPropertyDepth = writer.CurrentDepth; if (state.Current.NumberHandling != null && IsInternalConverterForNumberType) { WriteNumberWithCustomHandling(writer, value, state.Current.NumberHandling.Value); } else { Write(writer, value, options); } VerifyWrite(originalPropertyDepth, writer); return true; } Debug.Assert(IsInternalConverter); bool isContinuation = state.IsContinuation; bool success; if ( #if NETCOREAPP // Short-circuit the check against "is not null"; treated as a constant by recent versions of the JIT. !typeof(T).IsValueType && #else !IsValueType && #endif value is not null && // Do not handle objects that have already been // handled by a polymorphic converter for a base type. state.Current.PolymorphicSerializationState != PolymorphicSerializationState.PolymorphicReEntryStarted) { JsonConverter? polymorphicConverter = CanBePolymorphic ? state.Current.ResolvePolymorphicConverter(value, TypeToConvert, options) : null; Debug.Assert(polymorphicConverter is null || state.CurrentDepth > 0, "root-level polymorphic converters should not be handled here."); if (!isContinuation) { switch (options.ReferenceHandlingStrategy) { case ReferenceHandlingStrategy.IgnoreCycles: ReferenceResolver resolver = state.ReferenceResolver; if (resolver.ContainsReferenceForCycleDetection(value)) { writer.WriteNullValue(); return true; } resolver.PushReferenceForCycleDetection(value); // WriteStack reuses root-level stackframes for its children as a performance optimization; // we want to avoid writing any data for the root-level object to avoid corrupting the stack. // This is fine since popping the root object at the end of serialization is not essential. state.Current.IsPushedReferenceForCycleDetection = state.CurrentDepth > 0; break; case ReferenceHandlingStrategy.Preserve: bool canHaveIdMetata = polymorphicConverter?.CanHaveIdMetadata ?? CanHaveIdMetadata; if (canHaveIdMetata && JsonSerializer.TryGetReferenceForValue(value, ref state, writer)) { // We found a repeating reference and wrote the relevant metadata; serialization complete. return true; } break; default: Debug.Assert(options.ReferenceHandlingStrategy == ReferenceHandlingStrategy.None); break; } } if (polymorphicConverter is not null) { Debug.Assert(!polymorphicConverter.CanBePolymorphic, "Only ObjectConverter supports polymorphism."); state.Current.EnterPolymorphicConverter(); success = polymorphicConverter.TryWriteAsObject(writer, value, options, ref state); state.Current.ExitPolymorphicConverter(success); if (success) { if (state.Current.IsPushedReferenceForCycleDetection) { state.ReferenceResolver.PopReferenceForCycleDetection(); state.Current.IsPushedReferenceForCycleDetection = false; } } return success; } } #if DEBUG // DEBUG: ensure push/pop operations preserve stack integrity JsonTypeInfo originalJsonTypeInfo = state.Current.JsonTypeInfo; #endif state.Push(); Debug.Assert(TypeToConvert == state.Current.JsonTypeInfo.Type); #if DEBUG // For performance, only perform validation on internal converters on debug builds. if (!isContinuation) { Debug.Assert(state.Current.OriginalDepth == 0); state.Current.OriginalDepth = writer.CurrentDepth; } #endif success = OnTryWrite(writer, value, options, ref state); #if DEBUG if (success) { VerifyWrite(state.Current.OriginalDepth, writer); } #endif state.Pop(success); if (success && state.Current.IsPushedReferenceForCycleDetection) { state.ReferenceResolver.PopReferenceForCycleDetection(); state.Current.IsPushedReferenceForCycleDetection = false; } #if DEBUG Debug.Assert(ReferenceEquals(originalJsonTypeInfo, state.Current.JsonTypeInfo)); #endif return success; } internal bool TryWriteDataExtensionProperty(Utf8JsonWriter writer, T value, JsonSerializerOptions options, ref WriteStack state) { Debug.Assert(value != null); if (!IsInternalConverter) { return TryWrite(writer, value, options, ref state); } JsonDictionaryConverter<T>? dictionaryConverter = this as JsonDictionaryConverter<T> ?? (this as JsonMetadataServicesConverter<T>)?.Converter as JsonDictionaryConverter<T>; if (dictionaryConverter == null) { // If not JsonDictionaryConverter<T> then we are JsonObject. // Avoid a type reference to JsonObject and its converter to support trimming. Debug.Assert(TypeToConvert == typeof(Nodes.JsonObject)); return TryWrite(writer, value, options, ref state); } if (writer.CurrentDepth >= options.EffectiveMaxDepth) { ThrowHelper.ThrowJsonException_SerializerCycleDetected(options.EffectiveMaxDepth); } bool isContinuation = state.IsContinuation; bool success; state.Push(); if (!isContinuation) { Debug.Assert(state.Current.OriginalDepth == 0); state.Current.OriginalDepth = writer.CurrentDepth; } // Extension data properties change how dictionary key naming policies are applied. state.Current.IsWritingExtensionDataProperty = true; state.Current.JsonPropertyInfo = state.Current.JsonTypeInfo.ElementTypeInfo!.PropertyInfoForTypeInfo; success = dictionaryConverter.OnWriteResume(writer, value, options, ref state); if (success) { VerifyWrite(state.Current.OriginalDepth, writer); } state.Pop(success); return success; } internal sealed override Type TypeToConvert => typeof(T); internal void VerifyRead(JsonTokenType tokenType, int depth, long bytesConsumed, bool isValueConverter, ref Utf8JsonReader reader) { Debug.Assert(isValueConverter == (ConverterStrategy == ConverterStrategy.Value)); switch (tokenType) { case JsonTokenType.StartArray: if (reader.TokenType != JsonTokenType.EndArray) { ThrowHelper.ThrowJsonException_SerializationConverterRead(this); } else if (depth != reader.CurrentDepth) { ThrowHelper.ThrowJsonException_SerializationConverterRead(this); } break; case JsonTokenType.StartObject: if (reader.TokenType != JsonTokenType.EndObject) { ThrowHelper.ThrowJsonException_SerializationConverterRead(this); } else if (depth != reader.CurrentDepth) { ThrowHelper.ThrowJsonException_SerializationConverterRead(this); } break; default: if (isValueConverter) { // A value converter should not make any reads. if (reader.BytesConsumed != bytesConsumed) { ThrowHelper.ThrowJsonException_SerializationConverterRead(this); } } else { // A non-value converter (object or collection) should always have Start and End tokens // unless it is polymorphic or supports null value reads. if (!CanBePolymorphic && !(HandleNullOnRead && tokenType == JsonTokenType.Null)) { ThrowHelper.ThrowJsonException_SerializationConverterRead(this); } } // Should not be possible to change token type. Debug.Assert(reader.TokenType == tokenType); break; } } internal void VerifyWrite(int originalDepth, Utf8JsonWriter writer) { if (originalDepth != writer.CurrentDepth) { ThrowHelper.ThrowJsonException_SerializationConverterWrite(this); } } /// <summary> /// Write the value as JSON. /// </summary> /// <remarks> /// A converter may throw any Exception, but should throw <cref>JsonException</cref> when the JSON /// cannot be created. /// </remarks> /// <param name="writer">The <see cref="Utf8JsonWriter"/> to write to.</param> /// <param name="value">The value to convert. Note that the value of <seealso cref="HandleNull"/> determines if the converter handles <see langword="null" /> values.</param> /// <param name="options">The <see cref="JsonSerializerOptions"/> being used.</param> public abstract void Write( Utf8JsonWriter writer, #nullable disable // T may or may not be nullable depending on the derived type's overload. T value, #nullable restore JsonSerializerOptions options); /// <summary> /// Reads a dictionary key from a JSON property name. /// </summary> /// <param name="reader">The <see cref="Utf8JsonReader"/> to read from.</param> /// <param name="typeToConvert">The <see cref="Type"/> being converted.</param> /// <param name="options">The <see cref="JsonSerializerOptions"/> being used.</param> /// <returns>The value that was converted.</returns> /// <remarks>Method should be overridden in custom converters of types used in deserialized dictionary keys.</remarks> public virtual T ReadAsPropertyName(ref Utf8JsonReader reader, Type typeToConvert, JsonSerializerOptions options) { if (!IsInternalConverter && options.TryGetDefaultSimpleConverter(TypeToConvert, out JsonConverter? defaultConverter)) { // .NET 5 backward compatibility: hardcode the default converter for primitive key serialization. Debug.Assert(defaultConverter.IsInternalConverter && defaultConverter is JsonConverter<T>); return ((JsonConverter<T>)defaultConverter).ReadAsPropertyNameCore(ref reader, TypeToConvert, options); } ThrowHelper.ThrowNotSupportedException_DictionaryKeyTypeNotSupported(TypeToConvert, this); return default; } internal virtual T ReadAsPropertyNameCore(ref Utf8JsonReader reader, Type typeToConvert, JsonSerializerOptions options) { Debug.Assert(reader.TokenType == JsonTokenType.PropertyName); long originalBytesConsumed = reader.BytesConsumed; T result = ReadAsPropertyName(ref reader, typeToConvert, options); if (reader.BytesConsumed != originalBytesConsumed) { ThrowHelper.ThrowJsonException_SerializationConverterRead(this); } return result; } /// <summary> /// Writes a dictionary key as a JSON property name. /// </summary> /// <param name="writer">The <see cref="Utf8JsonWriter"/> to write to.</param> /// <param name="value">The value to convert. Note that the value of <seealso cref="HandleNull"/> determines if the converter handles <see langword="null" /> values.</param> /// <param name="options">The <see cref="JsonSerializerOptions"/> being used.</param> /// <remarks>Method should be overridden in custom converters of types used in serialized dictionary keys.</remarks> public virtual void WriteAsPropertyName(Utf8JsonWriter writer, T value, JsonSerializerOptions options) { if (!IsInternalConverter && options.TryGetDefaultSimpleConverter(TypeToConvert, out JsonConverter? defaultConverter)) { // .NET 5 backward compatibility: hardcode the default converter for primitive key serialization. Debug.Assert(defaultConverter.IsInternalConverter && defaultConverter is JsonConverter<T>); ((JsonConverter<T>)defaultConverter).WriteAsPropertyNameCore(writer, value, options, isWritingExtensionDataProperty: false); return; } ThrowHelper.ThrowNotSupportedException_DictionaryKeyTypeNotSupported(TypeToConvert, this); } internal virtual void WriteAsPropertyNameCore(Utf8JsonWriter writer, T value, JsonSerializerOptions options, bool isWritingExtensionDataProperty) { if (isWritingExtensionDataProperty) { // Extension data is meant as mechanism to gather unused JSON properties; // do not apply any custom key conversions and hardcode the default behavior. Debug.Assert(!IsInternalConverter && TypeToConvert == typeof(string)); writer.WritePropertyName((string)(object)value!); return; } int originalDepth = writer.CurrentDepth; WriteAsPropertyName(writer, value, options); if (originalDepth != writer.CurrentDepth || writer.TokenType != JsonTokenType.PropertyName) { ThrowHelper.ThrowJsonException_SerializationConverterWrite(this); } } internal sealed override void WriteAsPropertyNameCoreAsObject(Utf8JsonWriter writer, object value, JsonSerializerOptions options, bool isWritingExtensionDataProperty) => WriteAsPropertyNameCore(writer, (T)value, options, isWritingExtensionDataProperty); internal virtual T ReadNumberWithCustomHandling(ref Utf8JsonReader reader, JsonNumberHandling handling, JsonSerializerOptions options) => throw new InvalidOperationException(); internal virtual void WriteNumberWithCustomHandling(Utf8JsonWriter writer, T value, JsonNumberHandling handling) => throw new InvalidOperationException(); } }
-1
dotnet/runtime
66,372
Add Stopwatch.GetElapsedTime
Fixes https://github.com/dotnet/runtime/issues/65858
stephentoub
2022-03-09T01:52:28Z
2022-03-09T12:42:15Z
ca731545a58307870a0baebb0ee43eeea61f175f
c9f7f7389e8e9a00d501aef696333b67d218baac
Add Stopwatch.GetElapsedTime. Fixes https://github.com/dotnet/runtime/issues/65858
./src/tests/JIT/HardwareIntrinsics/Arm/AdvSimd/MaxPairwise.Vector64.Int16.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void MaxPairwise_Vector64_Int16() { var test = new SimpleBinaryOpTest__MaxPairwise_Vector64_Int16(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); } // Validates passing a static member works test.RunClsVarScenario(); if (AdvSimd.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class SimpleBinaryOpTest__MaxPairwise_Vector64_Int16 { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private byte[] outArray; private GCHandle inHandle1; private GCHandle inHandle2; private GCHandle outHandle; private ulong alignment; public DataTable(Int16[] inArray1, Int16[] inArray2, Int16[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int16>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Int16>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int16>(); if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int16, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Int16, byte>(ref inArray2[0]), (uint)sizeOfinArray2); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector64<Int16> _fld1; public Vector64<Int16> _fld2; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref testStruct._fld1), ref Unsafe.As<Int16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref testStruct._fld2), ref Unsafe.As<Int16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>()); return testStruct; } public void RunStructFldScenario(SimpleBinaryOpTest__MaxPairwise_Vector64_Int16 testClass) { var result = AdvSimd.MaxPairwise(_fld1, _fld2); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(SimpleBinaryOpTest__MaxPairwise_Vector64_Int16 testClass) { fixed (Vector64<Int16>* pFld1 = &_fld1) fixed (Vector64<Int16>* pFld2 = &_fld2) { var result = AdvSimd.MaxPairwise( AdvSimd.LoadVector64((Int16*)(pFld1)), AdvSimd.LoadVector64((Int16*)(pFld2)) ); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 8; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector64<Int16>>() / sizeof(Int16); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector64<Int16>>() / sizeof(Int16); private static readonly int RetElementCount = Unsafe.SizeOf<Vector64<Int16>>() / sizeof(Int16); private static Int16[] _data1 = new Int16[Op1ElementCount]; private static Int16[] _data2 = new Int16[Op2ElementCount]; private static Vector64<Int16> _clsVar1; private static Vector64<Int16> _clsVar2; private Vector64<Int16> _fld1; private Vector64<Int16> _fld2; private DataTable _dataTable; static SimpleBinaryOpTest__MaxPairwise_Vector64_Int16() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref _clsVar1), ref Unsafe.As<Int16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref _clsVar2), ref Unsafe.As<Int16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>()); } public SimpleBinaryOpTest__MaxPairwise_Vector64_Int16() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref _fld1), ref Unsafe.As<Int16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref _fld2), ref Unsafe.As<Int16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt16(); } _dataTable = new DataTable(_data1, _data2, new Int16[RetElementCount], LargestVectorSize); } public bool IsSupported => AdvSimd.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = AdvSimd.MaxPairwise( Unsafe.Read<Vector64<Int16>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector64<Int16>>(_dataTable.inArray2Ptr) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = AdvSimd.MaxPairwise( AdvSimd.LoadVector64((Int16*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector64((Int16*)(_dataTable.inArray2Ptr)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.MaxPairwise), new Type[] { typeof(Vector64<Int16>), typeof(Vector64<Int16>) }) .Invoke(null, new object[] { Unsafe.Read<Vector64<Int16>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector64<Int16>>(_dataTable.inArray2Ptr) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector64<Int16>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.MaxPairwise), new Type[] { typeof(Vector64<Int16>), typeof(Vector64<Int16>) }) .Invoke(null, new object[] { AdvSimd.LoadVector64((Int16*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector64((Int16*)(_dataTable.inArray2Ptr)) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector64<Int16>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = AdvSimd.MaxPairwise( _clsVar1, _clsVar2 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector64<Int16>* pClsVar1 = &_clsVar1) fixed (Vector64<Int16>* pClsVar2 = &_clsVar2) { var result = AdvSimd.MaxPairwise( AdvSimd.LoadVector64((Int16*)(pClsVar1)), AdvSimd.LoadVector64((Int16*)(pClsVar2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector64<Int16>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector64<Int16>>(_dataTable.inArray2Ptr); var result = AdvSimd.MaxPairwise(op1, op2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var op1 = AdvSimd.LoadVector64((Int16*)(_dataTable.inArray1Ptr)); var op2 = AdvSimd.LoadVector64((Int16*)(_dataTable.inArray2Ptr)); var result = AdvSimd.MaxPairwise(op1, op2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new SimpleBinaryOpTest__MaxPairwise_Vector64_Int16(); var result = AdvSimd.MaxPairwise(test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new SimpleBinaryOpTest__MaxPairwise_Vector64_Int16(); fixed (Vector64<Int16>* pFld1 = &test._fld1) fixed (Vector64<Int16>* pFld2 = &test._fld2) { var result = AdvSimd.MaxPairwise( AdvSimd.LoadVector64((Int16*)(pFld1)), AdvSimd.LoadVector64((Int16*)(pFld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = AdvSimd.MaxPairwise(_fld1, _fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector64<Int16>* pFld1 = &_fld1) fixed (Vector64<Int16>* pFld2 = &_fld2) { var result = AdvSimd.MaxPairwise( AdvSimd.LoadVector64((Int16*)(pFld1)), AdvSimd.LoadVector64((Int16*)(pFld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = AdvSimd.MaxPairwise(test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = AdvSimd.MaxPairwise( AdvSimd.LoadVector64((Int16*)(&test._fld1)), AdvSimd.LoadVector64((Int16*)(&test._fld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector64<Int16> op1, Vector64<Int16> op2, void* result, [CallerMemberName] string method = "") { Int16[] inArray1 = new Int16[Op1ElementCount]; Int16[] inArray2 = new Int16[Op2ElementCount]; Int16[] outArray = new Int16[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Int16, byte>(ref inArray1[0]), op1); Unsafe.WriteUnaligned(ref Unsafe.As<Int16, byte>(ref inArray2[0]), op2); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int16, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Int16>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(void* op1, void* op2, void* result, [CallerMemberName] string method = "") { Int16[] inArray1 = new Int16[Op1ElementCount]; Int16[] inArray2 = new Int16[Op2ElementCount]; Int16[] outArray = new Int16[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int16, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector64<Int16>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int16, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector64<Int16>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int16, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Int16>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(Int16[] left, Int16[] right, Int16[] result, [CallerMemberName] string method = "") { bool succeeded = true; for (var i = 0; i < RetElementCount; i++) { if (Helpers.MaxPairwise(left, right, i) != result[i]) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.MaxPairwise)}<Int16>(Vector64<Int16>, Vector64<Int16>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})"); TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void MaxPairwise_Vector64_Int16() { var test = new SimpleBinaryOpTest__MaxPairwise_Vector64_Int16(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); } // Validates passing a static member works test.RunClsVarScenario(); if (AdvSimd.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class SimpleBinaryOpTest__MaxPairwise_Vector64_Int16 { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private byte[] outArray; private GCHandle inHandle1; private GCHandle inHandle2; private GCHandle outHandle; private ulong alignment; public DataTable(Int16[] inArray1, Int16[] inArray2, Int16[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int16>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Int16>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int16>(); if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int16, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Int16, byte>(ref inArray2[0]), (uint)sizeOfinArray2); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector64<Int16> _fld1; public Vector64<Int16> _fld2; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref testStruct._fld1), ref Unsafe.As<Int16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref testStruct._fld2), ref Unsafe.As<Int16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>()); return testStruct; } public void RunStructFldScenario(SimpleBinaryOpTest__MaxPairwise_Vector64_Int16 testClass) { var result = AdvSimd.MaxPairwise(_fld1, _fld2); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(SimpleBinaryOpTest__MaxPairwise_Vector64_Int16 testClass) { fixed (Vector64<Int16>* pFld1 = &_fld1) fixed (Vector64<Int16>* pFld2 = &_fld2) { var result = AdvSimd.MaxPairwise( AdvSimd.LoadVector64((Int16*)(pFld1)), AdvSimd.LoadVector64((Int16*)(pFld2)) ); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 8; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector64<Int16>>() / sizeof(Int16); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector64<Int16>>() / sizeof(Int16); private static readonly int RetElementCount = Unsafe.SizeOf<Vector64<Int16>>() / sizeof(Int16); private static Int16[] _data1 = new Int16[Op1ElementCount]; private static Int16[] _data2 = new Int16[Op2ElementCount]; private static Vector64<Int16> _clsVar1; private static Vector64<Int16> _clsVar2; private Vector64<Int16> _fld1; private Vector64<Int16> _fld2; private DataTable _dataTable; static SimpleBinaryOpTest__MaxPairwise_Vector64_Int16() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref _clsVar1), ref Unsafe.As<Int16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref _clsVar2), ref Unsafe.As<Int16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>()); } public SimpleBinaryOpTest__MaxPairwise_Vector64_Int16() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref _fld1), ref Unsafe.As<Int16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref _fld2), ref Unsafe.As<Int16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt16(); } _dataTable = new DataTable(_data1, _data2, new Int16[RetElementCount], LargestVectorSize); } public bool IsSupported => AdvSimd.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = AdvSimd.MaxPairwise( Unsafe.Read<Vector64<Int16>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector64<Int16>>(_dataTable.inArray2Ptr) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = AdvSimd.MaxPairwise( AdvSimd.LoadVector64((Int16*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector64((Int16*)(_dataTable.inArray2Ptr)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.MaxPairwise), new Type[] { typeof(Vector64<Int16>), typeof(Vector64<Int16>) }) .Invoke(null, new object[] { Unsafe.Read<Vector64<Int16>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector64<Int16>>(_dataTable.inArray2Ptr) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector64<Int16>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.MaxPairwise), new Type[] { typeof(Vector64<Int16>), typeof(Vector64<Int16>) }) .Invoke(null, new object[] { AdvSimd.LoadVector64((Int16*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector64((Int16*)(_dataTable.inArray2Ptr)) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector64<Int16>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = AdvSimd.MaxPairwise( _clsVar1, _clsVar2 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector64<Int16>* pClsVar1 = &_clsVar1) fixed (Vector64<Int16>* pClsVar2 = &_clsVar2) { var result = AdvSimd.MaxPairwise( AdvSimd.LoadVector64((Int16*)(pClsVar1)), AdvSimd.LoadVector64((Int16*)(pClsVar2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector64<Int16>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector64<Int16>>(_dataTable.inArray2Ptr); var result = AdvSimd.MaxPairwise(op1, op2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var op1 = AdvSimd.LoadVector64((Int16*)(_dataTable.inArray1Ptr)); var op2 = AdvSimd.LoadVector64((Int16*)(_dataTable.inArray2Ptr)); var result = AdvSimd.MaxPairwise(op1, op2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new SimpleBinaryOpTest__MaxPairwise_Vector64_Int16(); var result = AdvSimd.MaxPairwise(test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new SimpleBinaryOpTest__MaxPairwise_Vector64_Int16(); fixed (Vector64<Int16>* pFld1 = &test._fld1) fixed (Vector64<Int16>* pFld2 = &test._fld2) { var result = AdvSimd.MaxPairwise( AdvSimd.LoadVector64((Int16*)(pFld1)), AdvSimd.LoadVector64((Int16*)(pFld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = AdvSimd.MaxPairwise(_fld1, _fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector64<Int16>* pFld1 = &_fld1) fixed (Vector64<Int16>* pFld2 = &_fld2) { var result = AdvSimd.MaxPairwise( AdvSimd.LoadVector64((Int16*)(pFld1)), AdvSimd.LoadVector64((Int16*)(pFld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = AdvSimd.MaxPairwise(test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = AdvSimd.MaxPairwise( AdvSimd.LoadVector64((Int16*)(&test._fld1)), AdvSimd.LoadVector64((Int16*)(&test._fld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector64<Int16> op1, Vector64<Int16> op2, void* result, [CallerMemberName] string method = "") { Int16[] inArray1 = new Int16[Op1ElementCount]; Int16[] inArray2 = new Int16[Op2ElementCount]; Int16[] outArray = new Int16[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Int16, byte>(ref inArray1[0]), op1); Unsafe.WriteUnaligned(ref Unsafe.As<Int16, byte>(ref inArray2[0]), op2); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int16, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Int16>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(void* op1, void* op2, void* result, [CallerMemberName] string method = "") { Int16[] inArray1 = new Int16[Op1ElementCount]; Int16[] inArray2 = new Int16[Op2ElementCount]; Int16[] outArray = new Int16[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int16, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector64<Int16>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int16, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector64<Int16>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int16, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Int16>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(Int16[] left, Int16[] right, Int16[] result, [CallerMemberName] string method = "") { bool succeeded = true; for (var i = 0; i < RetElementCount; i++) { if (Helpers.MaxPairwise(left, right, i) != result[i]) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.MaxPairwise)}<Int16>(Vector64<Int16>, Vector64<Int16>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})"); TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
-1
dotnet/runtime
66,372
Add Stopwatch.GetElapsedTime
Fixes https://github.com/dotnet/runtime/issues/65858
stephentoub
2022-03-09T01:52:28Z
2022-03-09T12:42:15Z
ca731545a58307870a0baebb0ee43eeea61f175f
c9f7f7389e8e9a00d501aef696333b67d218baac
Add Stopwatch.GetElapsedTime. Fixes https://github.com/dotnet/runtime/issues/65858
./src/libraries/System.Reflection.MetadataLoadContext/src/System/Reflection/TypeLoading/Parameters/RoPropertyIndexParameter.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.Diagnostics; namespace System.Reflection.TypeLoading { /// <summary> /// Base class for all RoParameter's returned by PropertyInfo.GetParameters(). These are identical to the associated /// getter's ParameterInfo's except for the Member property returning a property. /// </summary> internal sealed class RoPropertyIndexParameter : RoParameter { private readonly RoParameter _backingParameter; internal RoPropertyIndexParameter(RoProperty member, RoParameter backingParameter) : base(member, backingParameter.Position) { Debug.Assert(member != null); Debug.Assert(backingParameter != null); _backingParameter = backingParameter; } public sealed override int MetadataToken => _backingParameter.MetadataToken; public sealed override string? Name => _backingParameter.Name; public sealed override Type ParameterType => _backingParameter.ParameterType; public sealed override ParameterAttributes Attributes => _backingParameter.Attributes; public sealed override IEnumerable<CustomAttributeData> CustomAttributes => _backingParameter.CustomAttributes; public sealed override bool HasDefaultValue => _backingParameter.HasDefaultValue; public sealed override object? RawDefaultValue => _backingParameter.RawDefaultValue; public sealed override Type[] GetOptionalCustomModifiers() => _backingParameter.GetOptionalCustomModifiers(); public sealed override Type[] GetRequiredCustomModifiers() => _backingParameter.GetRequiredCustomModifiers(); public sealed override string ToString() => _backingParameter.ToString(); } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.Diagnostics; namespace System.Reflection.TypeLoading { /// <summary> /// Base class for all RoParameter's returned by PropertyInfo.GetParameters(). These are identical to the associated /// getter's ParameterInfo's except for the Member property returning a property. /// </summary> internal sealed class RoPropertyIndexParameter : RoParameter { private readonly RoParameter _backingParameter; internal RoPropertyIndexParameter(RoProperty member, RoParameter backingParameter) : base(member, backingParameter.Position) { Debug.Assert(member != null); Debug.Assert(backingParameter != null); _backingParameter = backingParameter; } public sealed override int MetadataToken => _backingParameter.MetadataToken; public sealed override string? Name => _backingParameter.Name; public sealed override Type ParameterType => _backingParameter.ParameterType; public sealed override ParameterAttributes Attributes => _backingParameter.Attributes; public sealed override IEnumerable<CustomAttributeData> CustomAttributes => _backingParameter.CustomAttributes; public sealed override bool HasDefaultValue => _backingParameter.HasDefaultValue; public sealed override object? RawDefaultValue => _backingParameter.RawDefaultValue; public sealed override Type[] GetOptionalCustomModifiers() => _backingParameter.GetOptionalCustomModifiers(); public sealed override Type[] GetRequiredCustomModifiers() => _backingParameter.GetRequiredCustomModifiers(); public sealed override string ToString() => _backingParameter.ToString(); } }
-1
dotnet/runtime
66,372
Add Stopwatch.GetElapsedTime
Fixes https://github.com/dotnet/runtime/issues/65858
stephentoub
2022-03-09T01:52:28Z
2022-03-09T12:42:15Z
ca731545a58307870a0baebb0ee43eeea61f175f
c9f7f7389e8e9a00d501aef696333b67d218baac
Add Stopwatch.GetElapsedTime. Fixes https://github.com/dotnet/runtime/issues/65858
./src/libraries/Common/tests/System/IO/VirtualDriveHelper.Windows.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.Diagnostics; using System.Linq; using System.Runtime.Versioning; namespace System.IO { // Adds test helper APIs to manipulate Windows virtual drives via SUBST. [SupportedOSPlatform("windows")] public class VirtualDriveHelper : IDisposable { // Temporary Windows directory that can be mounted to a drive letter using the subst command private string? _virtualDriveTargetDir = null; // Windows drive letter that points to a mounted directory using the subst command private char _virtualDriveLetter = default; /// <summary> /// If there is a SUBST'ed drive, Dispose unmounts it to free the drive letter. /// </summary> public void Dispose() { try { if (VirtualDriveLetter != default) { DeleteVirtualDrive(VirtualDriveLetter); Directory.Delete(VirtualDriveTargetDir, recursive: true); } } catch { } // avoid exceptions on dispose } /// <summary> /// Returns the path of a folder that is to be mounted using SUBST. /// </summary> public string VirtualDriveTargetDir { get { if (_virtualDriveTargetDir == null) { // Create a folder inside the temp directory so that it can be mounted to a drive letter with subst _virtualDriveTargetDir = Path.Join(Path.GetTempPath(), Path.GetRandomFileName()); Directory.CreateDirectory(_virtualDriveTargetDir); } return _virtualDriveTargetDir; } } /// <summary> /// Returns the drive letter of a drive letter that represents a mounted folder using SUBST. /// </summary> public char VirtualDriveLetter { get { if (_virtualDriveLetter == default) { // Mount the folder to a drive letter _virtualDriveLetter = CreateVirtualDrive(VirtualDriveTargetDir); } return _virtualDriveLetter; } } ///<summary> /// On Windows, mounts a folder to an assigned virtual drive letter using the subst command. /// subst is not available in Windows Nano. /// </summary> private static char CreateVirtualDrive(string targetDir) { char driveLetter = GetNextAvailableDriveLetter(); bool success = RunProcess(CreateProcessStartInfo("cmd", "/c", SubstPath, $"{driveLetter}:", targetDir)); if (!success || !DriveInfo.GetDrives().Any(x => x.Name[0] == driveLetter)) { throw new InvalidOperationException($"Could not create virtual drive {driveLetter}: with subst"); } return driveLetter; // Finds the next unused drive letter and returns it. char GetNextAvailableDriveLetter() { List<char> existingDrives = DriveInfo.GetDrives().Select(x => x.Name[0]).ToList(); // A,B are reserved, C is usually reserved IEnumerable<int> range = Enumerable.Range('D', 'Z' - 'D'); IEnumerable<char> castRange = range.Select(x => Convert.ToChar(x)); IEnumerable<char> allDrivesLetters = castRange.Except(existingDrives); if (!allDrivesLetters.Any()) { throw new ArgumentOutOfRangeException("No drive letters available"); } return allDrivesLetters.First(); } } /// <summary> /// On Windows, unassigns the specified virtual drive letter from its mounted folder. /// </summary> private static void DeleteVirtualDrive(char driveLetter) { bool success = RunProcess(CreateProcessStartInfo("cmd", "/c", SubstPath, "/d", $"{driveLetter}:")); if (!success || DriveInfo.GetDrives().Any(x => x.Name[0] == driveLetter)) { throw new InvalidOperationException($"Could not delete virtual drive {driveLetter}: with subst"); } } private static ProcessStartInfo CreateProcessStartInfo(string fileName, params string[] arguments) { var info = new ProcessStartInfo { FileName = fileName, UseShellExecute = false, RedirectStandardOutput = true }; foreach (var argument in arguments) { info.ArgumentList.Add(argument); } return info; } private static bool RunProcess(ProcessStartInfo startInfo) { using var process = Process.Start(startInfo); process.WaitForExit(); return process.ExitCode == 0; } private static string SubstPath { get { string systemRoot = Environment.GetEnvironmentVariable("SystemRoot") ?? @"C:\Windows"; return Path.Join(systemRoot, "System32", "subst.exe"); } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.Diagnostics; using System.Linq; using System.Runtime.Versioning; namespace System.IO { // Adds test helper APIs to manipulate Windows virtual drives via SUBST. [SupportedOSPlatform("windows")] public class VirtualDriveHelper : IDisposable { // Temporary Windows directory that can be mounted to a drive letter using the subst command private string? _virtualDriveTargetDir = null; // Windows drive letter that points to a mounted directory using the subst command private char _virtualDriveLetter = default; /// <summary> /// If there is a SUBST'ed drive, Dispose unmounts it to free the drive letter. /// </summary> public void Dispose() { try { if (VirtualDriveLetter != default) { DeleteVirtualDrive(VirtualDriveLetter); Directory.Delete(VirtualDriveTargetDir, recursive: true); } } catch { } // avoid exceptions on dispose } /// <summary> /// Returns the path of a folder that is to be mounted using SUBST. /// </summary> public string VirtualDriveTargetDir { get { if (_virtualDriveTargetDir == null) { // Create a folder inside the temp directory so that it can be mounted to a drive letter with subst _virtualDriveTargetDir = Path.Join(Path.GetTempPath(), Path.GetRandomFileName()); Directory.CreateDirectory(_virtualDriveTargetDir); } return _virtualDriveTargetDir; } } /// <summary> /// Returns the drive letter of a drive letter that represents a mounted folder using SUBST. /// </summary> public char VirtualDriveLetter { get { if (_virtualDriveLetter == default) { // Mount the folder to a drive letter _virtualDriveLetter = CreateVirtualDrive(VirtualDriveTargetDir); } return _virtualDriveLetter; } } ///<summary> /// On Windows, mounts a folder to an assigned virtual drive letter using the subst command. /// subst is not available in Windows Nano. /// </summary> private static char CreateVirtualDrive(string targetDir) { char driveLetter = GetNextAvailableDriveLetter(); bool success = RunProcess(CreateProcessStartInfo("cmd", "/c", SubstPath, $"{driveLetter}:", targetDir)); if (!success || !DriveInfo.GetDrives().Any(x => x.Name[0] == driveLetter)) { throw new InvalidOperationException($"Could not create virtual drive {driveLetter}: with subst"); } return driveLetter; // Finds the next unused drive letter and returns it. char GetNextAvailableDriveLetter() { List<char> existingDrives = DriveInfo.GetDrives().Select(x => x.Name[0]).ToList(); // A,B are reserved, C is usually reserved IEnumerable<int> range = Enumerable.Range('D', 'Z' - 'D'); IEnumerable<char> castRange = range.Select(x => Convert.ToChar(x)); IEnumerable<char> allDrivesLetters = castRange.Except(existingDrives); if (!allDrivesLetters.Any()) { throw new ArgumentOutOfRangeException("No drive letters available"); } return allDrivesLetters.First(); } } /// <summary> /// On Windows, unassigns the specified virtual drive letter from its mounted folder. /// </summary> private static void DeleteVirtualDrive(char driveLetter) { bool success = RunProcess(CreateProcessStartInfo("cmd", "/c", SubstPath, "/d", $"{driveLetter}:")); if (!success || DriveInfo.GetDrives().Any(x => x.Name[0] == driveLetter)) { throw new InvalidOperationException($"Could not delete virtual drive {driveLetter}: with subst"); } } private static ProcessStartInfo CreateProcessStartInfo(string fileName, params string[] arguments) { var info = new ProcessStartInfo { FileName = fileName, UseShellExecute = false, RedirectStandardOutput = true }; foreach (var argument in arguments) { info.ArgumentList.Add(argument); } return info; } private static bool RunProcess(ProcessStartInfo startInfo) { using var process = Process.Start(startInfo); process.WaitForExit(); return process.ExitCode == 0; } private static string SubstPath { get { string systemRoot = Environment.GetEnvironmentVariable("SystemRoot") ?? @"C:\Windows"; return Path.Join(systemRoot, "System32", "subst.exe"); } } } }
-1
dotnet/runtime
66,372
Add Stopwatch.GetElapsedTime
Fixes https://github.com/dotnet/runtime/issues/65858
stephentoub
2022-03-09T01:52:28Z
2022-03-09T12:42:15Z
ca731545a58307870a0baebb0ee43eeea61f175f
c9f7f7389e8e9a00d501aef696333b67d218baac
Add Stopwatch.GetElapsedTime. Fixes https://github.com/dotnet/runtime/issues/65858
./src/native/public/mono/metadata/image.h
/** * \file */ #ifndef _MONONET_METADATA_IMAGE_H_ #define _MONONET_METADATA_IMAGE_H_ #include <mono/metadata/details/image-types.h> MONO_BEGIN_DECLS #define MONO_API_FUNCTION(ret,name,args) MONO_API ret name args; #include <mono/metadata/details/image-functions.h> #undef MONO_API_FUNCTION mono_bool mono_has_pdb_checksum (char *raw_data, uint32_t raw_data_len); MONO_END_DECLS #endif
/** * \file */ #ifndef _MONONET_METADATA_IMAGE_H_ #define _MONONET_METADATA_IMAGE_H_ #include <mono/metadata/details/image-types.h> MONO_BEGIN_DECLS #define MONO_API_FUNCTION(ret,name,args) MONO_API ret name args; #include <mono/metadata/details/image-functions.h> #undef MONO_API_FUNCTION mono_bool mono_has_pdb_checksum (char *raw_data, uint32_t raw_data_len); MONO_END_DECLS #endif
-1
dotnet/runtime
66,372
Add Stopwatch.GetElapsedTime
Fixes https://github.com/dotnet/runtime/issues/65858
stephentoub
2022-03-09T01:52:28Z
2022-03-09T12:42:15Z
ca731545a58307870a0baebb0ee43eeea61f175f
c9f7f7389e8e9a00d501aef696333b67d218baac
Add Stopwatch.GetElapsedTime. Fixes https://github.com/dotnet/runtime/issues/65858
./src/tests/JIT/Methodical/tailcall/compat_i_u2_il_r.ilproj
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>PdbOnly</DebugType> </PropertyGroup> <ItemGroup> <Compile Include="compat_i_u2.il" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>PdbOnly</DebugType> </PropertyGroup> <ItemGroup> <Compile Include="compat_i_u2.il" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,372
Add Stopwatch.GetElapsedTime
Fixes https://github.com/dotnet/runtime/issues/65858
stephentoub
2022-03-09T01:52:28Z
2022-03-09T12:42:15Z
ca731545a58307870a0baebb0ee43eeea61f175f
c9f7f7389e8e9a00d501aef696333b67d218baac
Add Stopwatch.GetElapsedTime. Fixes https://github.com/dotnet/runtime/issues/65858
./src/libraries/System.Diagnostics.FileVersionInfo/tests/System.Diagnostics.FileVersionInfo.TestAssembly/Assembly1.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Reflection; // Comments [assembly:AssemblyDescriptionAttribute("Have you played a Contoso amusement device today?")] // CompanyName [assembly:AssemblyCompanyAttribute("The name of the company.")] // FileDescription [assembly:AssemblyTitleAttribute("My File")] // FileVersion [assembly:AssemblyFileVersionAttribute("4.3.2.1")] // ProductVersion (overrides FileVersion to be the ProductVersion) [assembly: AssemblyInformationalVersionAttribute("1.2.3-beta.4")] // LegalCopyright [assembly:AssemblyCopyrightAttribute("Copyright, you betcha!")] // LegalTrademarks [assembly:AssemblyTrademarkAttribute("TM")] // Product [assembly:AssemblyProductAttribute("The greatest product EVER")] namespace System.Diagnostics.Tests { public class Test { public static void Main() { } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Reflection; // Comments [assembly:AssemblyDescriptionAttribute("Have you played a Contoso amusement device today?")] // CompanyName [assembly:AssemblyCompanyAttribute("The name of the company.")] // FileDescription [assembly:AssemblyTitleAttribute("My File")] // FileVersion [assembly:AssemblyFileVersionAttribute("4.3.2.1")] // ProductVersion (overrides FileVersion to be the ProductVersion) [assembly: AssemblyInformationalVersionAttribute("1.2.3-beta.4")] // LegalCopyright [assembly:AssemblyCopyrightAttribute("Copyright, you betcha!")] // LegalTrademarks [assembly:AssemblyTrademarkAttribute("TM")] // Product [assembly:AssemblyProductAttribute("The greatest product EVER")] namespace System.Diagnostics.Tests { public class Test { public static void Main() { } } }
-1
dotnet/runtime
66,372
Add Stopwatch.GetElapsedTime
Fixes https://github.com/dotnet/runtime/issues/65858
stephentoub
2022-03-09T01:52:28Z
2022-03-09T12:42:15Z
ca731545a58307870a0baebb0ee43eeea61f175f
c9f7f7389e8e9a00d501aef696333b67d218baac
Add Stopwatch.GetElapsedTime. Fixes https://github.com/dotnet/runtime/issues/65858
./src/tests/Loader/classloader/TypeGeneratorTests/TypeGeneratorTest893/Generated893.il
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern mscorlib { .publickeytoken = (B7 7A 5C 56 19 34 E0 89 ) .ver 4:0:0:0 } .assembly extern TestFramework { .publickeytoken = ( B0 3F 5F 7F 11 D5 0A 3A ) } //TYPES IN FORWARDER ASSEMBLIES: //TEST ASSEMBLY: .assembly Generated893 { .hash algorithm 0x00008004 } .assembly extern xunit.core {} .class public BaseClass0 { .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void [mscorlib]System.Object::.ctor() ret } } .class public BaseClass1 extends BaseClass0 { .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void BaseClass0::.ctor() ret } } .class public G3_C1365`1<T0> extends class G2_C392`2<class BaseClass1,!T0> implements class IBase2`2<class BaseClass1,!T0> { .method public hidebysig virtual instance string Method7<M0>() cil managed noinlining { ldstr "G3_C1365::Method7.15786<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string ClassMethod4121() cil managed noinlining { ldstr "G3_C1365::ClassMethod4121.15787()" ret } .method public hidebysig newslot virtual instance string ClassMethod4122() cil managed noinlining { ldstr "G3_C1365::ClassMethod4122.15788()" ret } .method public hidebysig newslot virtual instance string ClassMethod4123<M0>() cil managed noinlining { ldstr "G3_C1365::ClassMethod4123.15789<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string ClassMethod4124<M0>() cil managed noinlining { ldstr "G3_C1365::ClassMethod4124.15790<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string 'G2_C392<class BaseClass1,T0>.ClassMethod2131'() cil managed noinlining { .override method instance string class G2_C392`2<class BaseClass1,!T0>::ClassMethod2131() ldstr "G3_C1365::ClassMethod2131.MI.15791()" ret } .method public hidebysig newslot virtual instance string 'G2_C392<class BaseClass1,T0>.ClassMethod2132'() cil managed noinlining { .override method instance string class G2_C392`2<class BaseClass1,!T0>::ClassMethod2132() ldstr "G3_C1365::ClassMethod2132.MI.15792()" ret } .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void class G2_C392`2<class BaseClass1,!T0>::.ctor() ret } } .class public G2_C392`2<T0, T1> extends class G1_C7`2<!T1,class BaseClass1> implements class IBase1`1<!T0>, class IBase2`2<class BaseClass0,class BaseClass0> { .method public hidebysig virtual instance string Method4() cil managed noinlining { ldstr "G2_C392::Method4.8467()" ret } .method public hidebysig newslot virtual instance string 'IBase1<T0>.Method4'() cil managed noinlining { .override method instance string class IBase1`1<!T0>::Method4() ldstr "G2_C392::Method4.MI.8468()" ret } .method public hidebysig virtual instance string Method5() cil managed noinlining { ldstr "G2_C392::Method5.8469()" ret } .method public hidebysig virtual instance string Method6<M0>() cil managed noinlining { ldstr "G2_C392::Method6.8470<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string Method7<M0>() cil managed noinlining { ldstr "G2_C392::Method7.8471<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string 'IBase2<class BaseClass0,class BaseClass0>.Method7'<M0>() cil managed noinlining { .override method instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<[1]>() ldstr "G2_C392::Method7.MI.8472<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string ClassMethod2131() cil managed noinlining { ldstr "G2_C392::ClassMethod2131.8473()" ret } .method public hidebysig newslot virtual instance string ClassMethod2132() cil managed noinlining { ldstr "G2_C392::ClassMethod2132.8474()" ret } .method public hidebysig newslot virtual instance string 'G1_C7<T1,class BaseClass1>.ClassMethod1329'() cil managed noinlining { .override method instance string class G1_C7`2<!T1,class BaseClass1>::ClassMethod1329() ldstr "G2_C392::ClassMethod1329.MI.8475()" ret } .method public hidebysig newslot virtual instance string 'G1_C7<T1,class BaseClass1>.ClassMethod1330'<M0>() cil managed noinlining { .override method instance string class G1_C7`2<!T1,class BaseClass1>::ClassMethod1330<[1]>() ldstr "G2_C392::ClassMethod1330.MI.8476<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void class G1_C7`2<!T1,class BaseClass1>::.ctor() ret } } .class interface public abstract IBase2`2<+T0, -T1> { .method public hidebysig newslot abstract virtual instance string Method7<M0>() cil managed { } } .class public abstract G1_C7`2<T0, T1> implements class IBase1`1<!T0>, class IBase2`2<class BaseClass1,!T0> { .method public hidebysig virtual instance string Method4() cil managed noinlining { ldstr "G1_C7::Method4.4811()" ret } .method public hidebysig newslot virtual instance string Method5() cil managed noinlining { ldstr "G1_C7::Method5.4812()" ret } .method public hidebysig newslot virtual instance string Method6<M0>() cil managed noinlining { ldstr "G1_C7::Method6.4813<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string 'IBase1<T0>.Method6'<M0>() cil managed noinlining { .override method instance string class IBase1`1<!T0>::Method6<[1]>() ldstr "G1_C7::Method6.MI.4814<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig virtual instance string Method7<M0>() cil managed noinlining { ldstr "G1_C7::Method7.4815<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string 'IBase2<class BaseClass1,T0>.Method7'<M0>() cil managed noinlining { .override method instance string class IBase2`2<class BaseClass1,!T0>::Method7<[1]>() ldstr "G1_C7::Method7.MI.4816<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string ClassMethod1328() cil managed noinlining { ldstr "G1_C7::ClassMethod1328.4817()" ret } .method public hidebysig newslot virtual instance string ClassMethod1329() cil managed noinlining { ldstr "G1_C7::ClassMethod1329.4818()" ret } .method public hidebysig newslot virtual instance string ClassMethod1330<M0>() cil managed noinlining { ldstr "G1_C7::ClassMethod1330.4819<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string ClassMethod1331<M0>() cil managed noinlining { ldstr "G1_C7::ClassMethod1331.4820<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void [mscorlib]System.Object::.ctor() ret } } .class interface public abstract IBase1`1<+T0> { .method public hidebysig newslot abstract virtual instance string Method4() cil managed { } .method public hidebysig newslot abstract virtual instance string Method5() cil managed { } .method public hidebysig newslot abstract virtual instance string Method6<M0>() cil managed { } } .class public auto ansi beforefieldinit Generated893 { .method static void M.BaseClass0<(BaseClass0)W>(!!W inst, string exp) cil managed { .maxstack 5 .locals init (string[] actualResults) ldc.i4.s 0 newarr string stloc.s actualResults ldarg.1 ldstr "M.BaseClass0<(BaseClass0)W>(!!W inst, string exp)" ldc.i4.s 0 ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.BaseClass1<(BaseClass1)W>(!!W inst, string exp) cil managed { .maxstack 5 .locals init (string[] actualResults) ldc.i4.s 0 newarr string stloc.s actualResults ldarg.1 ldstr "M.BaseClass1<(BaseClass1)W>(!!W inst, string exp)" ldc.i4.s 0 ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G3_C1365.T<T0,(class G3_C1365`1<!!T0>)W>(!!W 'inst', string exp) cil managed { .maxstack 19 .locals init (string[] actualResults) ldc.i4.s 14 newarr string stloc.s actualResults ldarg.1 ldstr "M.G3_C1365.T<T0,(class G3_C1365`1<!!T0>)W>(!!W 'inst', string exp)" ldc.i4.s 14 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<!!T0>::ClassMethod1328() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<!!T0>::ClassMethod1329() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<!!T0>::ClassMethod1330<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<!!T0>::ClassMethod1331<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<!!T0>::ClassMethod2131() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<!!T0>::ClassMethod2132() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<!!T0>::ClassMethod4121() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<!!T0>::ClassMethod4122() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<!!T0>::ClassMethod4123<object>() stelem.ref ldloc.s actualResults ldc.i4.s 9 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<!!T0>::ClassMethod4124<object>() stelem.ref ldloc.s actualResults ldc.i4.s 10 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<!!T0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 11 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<!!T0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 12 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<!!T0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 13 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<!!T0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G3_C1365.A<(class G3_C1365`1<class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 19 .locals init (string[] actualResults) ldc.i4.s 14 newarr string stloc.s actualResults ldarg.1 ldstr "M.G3_C1365.A<(class G3_C1365`1<class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 14 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<class BaseClass0>::ClassMethod1328() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<class BaseClass0>::ClassMethod1329() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<class BaseClass0>::ClassMethod1330<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<class BaseClass0>::ClassMethod1331<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<class BaseClass0>::ClassMethod2131() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<class BaseClass0>::ClassMethod2132() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<class BaseClass0>::ClassMethod4121() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<class BaseClass0>::ClassMethod4122() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<class BaseClass0>::ClassMethod4123<object>() stelem.ref ldloc.s actualResults ldc.i4.s 9 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<class BaseClass0>::ClassMethod4124<object>() stelem.ref ldloc.s actualResults ldc.i4.s 10 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<class BaseClass0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 11 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<class BaseClass0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 12 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<class BaseClass0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 13 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G3_C1365.B<(class G3_C1365`1<class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 19 .locals init (string[] actualResults) ldc.i4.s 14 newarr string stloc.s actualResults ldarg.1 ldstr "M.G3_C1365.B<(class G3_C1365`1<class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 14 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<class BaseClass1>::ClassMethod1328() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<class BaseClass1>::ClassMethod1329() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<class BaseClass1>::ClassMethod1330<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<class BaseClass1>::ClassMethod1331<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<class BaseClass1>::ClassMethod2131() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<class BaseClass1>::ClassMethod2132() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<class BaseClass1>::ClassMethod4121() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<class BaseClass1>::ClassMethod4122() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<class BaseClass1>::ClassMethod4123<object>() stelem.ref ldloc.s actualResults ldc.i4.s 9 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<class BaseClass1>::ClassMethod4124<object>() stelem.ref ldloc.s actualResults ldc.i4.s 10 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<class BaseClass1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 11 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<class BaseClass1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 12 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<class BaseClass1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 13 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C392.T.T<T0,T1,(class G2_C392`2<!!T0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 15 .locals init (string[] actualResults) ldc.i4.s 10 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C392.T.T<T0,T1,(class G2_C392`2<!!T0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 10 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<!!T0,!!T1>::ClassMethod1328() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<!!T0,!!T1>::ClassMethod1329() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<!!T0,!!T1>::ClassMethod1330<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<!!T0,!!T1>::ClassMethod1331<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<!!T0,!!T1>::ClassMethod2131() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<!!T0,!!T1>::ClassMethod2132() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<!!T0,!!T1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<!!T0,!!T1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<!!T0,!!T1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 9 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<!!T0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C392.A.T<T1,(class G2_C392`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 15 .locals init (string[] actualResults) ldc.i4.s 10 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C392.A.T<T1,(class G2_C392`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 10 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass0,!!T1>::ClassMethod1328() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass0,!!T1>::ClassMethod1329() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass0,!!T1>::ClassMethod1330<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass0,!!T1>::ClassMethod1331<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass0,!!T1>::ClassMethod2131() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass0,!!T1>::ClassMethod2132() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass0,!!T1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass0,!!T1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass0,!!T1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 9 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C392.A.A<(class G2_C392`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 15 .locals init (string[] actualResults) ldc.i4.s 10 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C392.A.A<(class G2_C392`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 10 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass0>::ClassMethod1328() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass0>::ClassMethod1329() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass0>::ClassMethod1330<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass0>::ClassMethod1331<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass0>::ClassMethod2131() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass0>::ClassMethod2132() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 9 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C392.A.B<(class G2_C392`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 15 .locals init (string[] actualResults) ldc.i4.s 10 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C392.A.B<(class G2_C392`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 10 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass1>::ClassMethod1328() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass1>::ClassMethod1329() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass1>::ClassMethod1330<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass1>::ClassMethod1331<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass1>::ClassMethod2131() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass1>::ClassMethod2132() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 9 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C392.B.T<T1,(class G2_C392`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 15 .locals init (string[] actualResults) ldc.i4.s 10 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C392.B.T<T1,(class G2_C392`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 10 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass1,!!T1>::ClassMethod1328() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass1,!!T1>::ClassMethod1329() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass1,!!T1>::ClassMethod1330<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass1,!!T1>::ClassMethod1331<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass1,!!T1>::ClassMethod2131() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass1,!!T1>::ClassMethod2132() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass1,!!T1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass1,!!T1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass1,!!T1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 9 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass1,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C392.B.A<(class G2_C392`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 15 .locals init (string[] actualResults) ldc.i4.s 10 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C392.B.A<(class G2_C392`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 10 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass0>::ClassMethod1328() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass0>::ClassMethod1329() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass0>::ClassMethod1330<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass0>::ClassMethod1331<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass0>::ClassMethod2131() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass0>::ClassMethod2132() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 9 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C392.B.B<(class G2_C392`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 15 .locals init (string[] actualResults) ldc.i4.s 10 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C392.B.B<(class G2_C392`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 10 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass1>::ClassMethod1328() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass1>::ClassMethod1329() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass1>::ClassMethod1330<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass1>::ClassMethod1331<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass1>::ClassMethod2131() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass1>::ClassMethod2132() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 9 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.T.T<T0,T1,(class IBase2`2<!!T0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.T.T<T0,T1,(class IBase2`2<!!T0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<!!T0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.A.T<T1,(class IBase2`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.A.T<T1,(class IBase2`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.A.A<(class IBase2`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.A.A<(class IBase2`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.A.B<(class IBase2`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.A.B<(class IBase2`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.B.T<T1,(class IBase2`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.B.T<T1,(class IBase2`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass1,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.B.A<(class IBase2`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.B.A<(class IBase2`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.B.B<(class IBase2`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.B.B<(class IBase2`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C7.T.T<T0,T1,(class G1_C7`2<!!T0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 13 .locals init (string[] actualResults) ldc.i4.s 8 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C7.T.T<T0,T1,(class G1_C7`2<!!T0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 8 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<!!T0,!!T1>::ClassMethod1328() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<!!T0,!!T1>::ClassMethod1329() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<!!T0,!!T1>::ClassMethod1330<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<!!T0,!!T1>::ClassMethod1331<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<!!T0,!!T1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<!!T0,!!T1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<!!T0,!!T1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<!!T0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C7.A.T<T1,(class G1_C7`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 13 .locals init (string[] actualResults) ldc.i4.s 8 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C7.A.T<T1,(class G1_C7`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 8 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass0,!!T1>::ClassMethod1328() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass0,!!T1>::ClassMethod1329() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass0,!!T1>::ClassMethod1330<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass0,!!T1>::ClassMethod1331<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass0,!!T1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass0,!!T1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass0,!!T1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C7.A.A<(class G1_C7`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 13 .locals init (string[] actualResults) ldc.i4.s 8 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C7.A.A<(class G1_C7`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 8 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass0>::ClassMethod1328() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass0>::ClassMethod1329() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass0>::ClassMethod1330<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass0>::ClassMethod1331<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C7.A.B<(class G1_C7`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 13 .locals init (string[] actualResults) ldc.i4.s 8 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C7.A.B<(class G1_C7`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 8 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass1>::ClassMethod1328() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass1>::ClassMethod1329() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass1>::ClassMethod1330<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass1>::ClassMethod1331<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C7.B.T<T1,(class G1_C7`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 13 .locals init (string[] actualResults) ldc.i4.s 8 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C7.B.T<T1,(class G1_C7`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 8 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass1,!!T1>::ClassMethod1328() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass1,!!T1>::ClassMethod1329() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass1,!!T1>::ClassMethod1330<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass1,!!T1>::ClassMethod1331<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass1,!!T1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass1,!!T1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass1,!!T1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass1,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C7.B.A<(class G1_C7`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 13 .locals init (string[] actualResults) ldc.i4.s 8 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C7.B.A<(class G1_C7`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 8 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass0>::ClassMethod1328() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass0>::ClassMethod1329() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass0>::ClassMethod1330<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass0>::ClassMethod1331<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C7.B.B<(class G1_C7`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 13 .locals init (string[] actualResults) ldc.i4.s 8 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C7.B.B<(class G1_C7`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 8 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass1>::ClassMethod1328() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass1>::ClassMethod1329() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass1>::ClassMethod1330<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass1>::ClassMethod1331<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase1.T<T0,(class IBase1`1<!!T0>)W>(!!W 'inst', string exp) cil managed { .maxstack 8 .locals init (string[] actualResults) ldc.i4.s 3 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase1.T<T0,(class IBase1`1<!!T0>)W>(!!W 'inst', string exp)" ldc.i4.s 3 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<!!T0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<!!T0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<!!T0>::Method6<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase1.A<(class IBase1`1<class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 8 .locals init (string[] actualResults) ldc.i4.s 3 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase1.A<(class IBase1`1<class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 3 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase1.B<(class IBase1`1<class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 8 .locals init (string[] actualResults) ldc.i4.s 3 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase1.B<(class IBase1`1<class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 3 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method public hidebysig static void MethodCallingTest() cil managed { .maxstack 10 .locals init (object V_0) ldstr "========================== Method Calling Test ==========================" call void [mscorlib]System.Console::WriteLine(string) newobj instance void class G3_C1365`1<class BaseClass0>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C7`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass1>::ClassMethod1331<object>() ldstr "G1_C7::ClassMethod1331.4820<System.Object>()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass1>::ClassMethod1330<object>() ldstr "G2_C392::ClassMethod1330.MI.8476<System.Object>()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass1>::ClassMethod1329() ldstr "G2_C392::ClassMethod1329.MI.8475()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass1>::ClassMethod1328() ldstr "G1_C7::ClassMethod1328.4817()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G1_C7::Method7.4815<System.Object>()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass1>::Method6<object>() ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass1>::Method5() ldstr "G2_C392::Method5.8469()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass1>::Method4() ldstr "G2_C392::Method4.8467()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G2_C392::Method4.MI.8468()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G2_C392::Method5.8469()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() ldstr "G3_C1365::Method7.15786<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G3_C1365::Method7.15786<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G3_C1365::Method7.15786<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G3_C1365::Method7.15786<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G2_C392`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass0>::ClassMethod2132() ldstr "G3_C1365::ClassMethod2132.MI.15792()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass0>::ClassMethod2131() ldstr "G3_C1365::ClassMethod2131.MI.15791()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass0>::Method7<object>() ldstr "G3_C1365::Method7.15786<System.Object>()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass0>::Method6<object>() ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass0>::Method5() ldstr "G2_C392::Method5.8469()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass0>::Method4() ldstr "G2_C392::Method4.8467()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass0>::ClassMethod1331<object>() ldstr "G1_C7::ClassMethod1331.4820<System.Object>()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass0>::ClassMethod1330<object>() ldstr "G2_C392::ClassMethod1330.MI.8476<System.Object>()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass0>::ClassMethod1329() ldstr "G2_C392::ClassMethod1329.MI.8475()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass0>::ClassMethod1328() ldstr "G1_C7::ClassMethod1328.4817()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass1>::Method4() ldstr "G2_C392::Method4.MI.8468()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass1>::Method5() ldstr "G2_C392::Method5.8469()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>() ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G3_C1365`1<class BaseClass0> callvirt instance string class G3_C1365`1<class BaseClass0>::ClassMethod4124<object>() ldstr "G3_C1365::ClassMethod4124.15790<System.Object>()" ldstr "class G3_C1365`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1365`1<class BaseClass0> callvirt instance string class G3_C1365`1<class BaseClass0>::ClassMethod4123<object>() ldstr "G3_C1365::ClassMethod4123.15789<System.Object>()" ldstr "class G3_C1365`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1365`1<class BaseClass0> callvirt instance string class G3_C1365`1<class BaseClass0>::ClassMethod4122() ldstr "G3_C1365::ClassMethod4122.15788()" ldstr "class G3_C1365`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1365`1<class BaseClass0> callvirt instance string class G3_C1365`1<class BaseClass0>::ClassMethod4121() ldstr "G3_C1365::ClassMethod4121.15787()" ldstr "class G3_C1365`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1365`1<class BaseClass0> callvirt instance string class G3_C1365`1<class BaseClass0>::Method7<object>() ldstr "G3_C1365::Method7.15786<System.Object>()" ldstr "class G3_C1365`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1365`1<class BaseClass0> callvirt instance string class G3_C1365`1<class BaseClass0>::ClassMethod2132() ldstr "G3_C1365::ClassMethod2132.MI.15792()" ldstr "class G3_C1365`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1365`1<class BaseClass0> callvirt instance string class G3_C1365`1<class BaseClass0>::ClassMethod2131() ldstr "G3_C1365::ClassMethod2131.MI.15791()" ldstr "class G3_C1365`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1365`1<class BaseClass0> callvirt instance string class G3_C1365`1<class BaseClass0>::Method6<object>() ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class G3_C1365`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1365`1<class BaseClass0> callvirt instance string class G3_C1365`1<class BaseClass0>::Method5() ldstr "G2_C392::Method5.8469()" ldstr "class G3_C1365`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1365`1<class BaseClass0> callvirt instance string class G3_C1365`1<class BaseClass0>::Method4() ldstr "G2_C392::Method4.8467()" ldstr "class G3_C1365`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1365`1<class BaseClass0> callvirt instance string class G3_C1365`1<class BaseClass0>::ClassMethod1331<object>() ldstr "G1_C7::ClassMethod1331.4820<System.Object>()" ldstr "class G3_C1365`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1365`1<class BaseClass0> callvirt instance string class G3_C1365`1<class BaseClass0>::ClassMethod1330<object>() ldstr "G2_C392::ClassMethod1330.MI.8476<System.Object>()" ldstr "class G3_C1365`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1365`1<class BaseClass0> callvirt instance string class G3_C1365`1<class BaseClass0>::ClassMethod1329() ldstr "G2_C392::ClassMethod1329.MI.8475()" ldstr "class G3_C1365`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1365`1<class BaseClass0> callvirt instance string class G3_C1365`1<class BaseClass0>::ClassMethod1328() ldstr "G1_C7::ClassMethod1328.4817()" ldstr "class G3_C1365`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G3_C1365`1<class BaseClass1>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C7`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass1>::ClassMethod1331<object>() ldstr "G1_C7::ClassMethod1331.4820<System.Object>()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass1>::ClassMethod1330<object>() ldstr "G2_C392::ClassMethod1330.MI.8476<System.Object>()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass1>::ClassMethod1329() ldstr "G2_C392::ClassMethod1329.MI.8475()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass1>::ClassMethod1328() ldstr "G1_C7::ClassMethod1328.4817()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G1_C7::Method7.4815<System.Object>()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass1>::Method6<object>() ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass1>::Method5() ldstr "G2_C392::Method5.8469()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass1>::Method4() ldstr "G2_C392::Method4.8467()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass1>::Method4() ldstr "G2_C392::Method4.MI.8468()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass1>::Method5() ldstr "G2_C392::Method5.8469()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>() ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G3_C1365::Method7.15786<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G2_C392::Method4.MI.8468()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G2_C392::Method5.8469()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G3_C1365::Method7.15786<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G2_C392`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass1>::ClassMethod2132() ldstr "G3_C1365::ClassMethod2132.MI.15792()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass1>::ClassMethod2131() ldstr "G3_C1365::ClassMethod2131.MI.15791()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G3_C1365::Method7.15786<System.Object>()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass1>::Method6<object>() ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass1>::Method5() ldstr "G2_C392::Method5.8469()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass1>::Method4() ldstr "G2_C392::Method4.8467()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass1>::ClassMethod1331<object>() ldstr "G1_C7::ClassMethod1331.4820<System.Object>()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass1>::ClassMethod1330<object>() ldstr "G2_C392::ClassMethod1330.MI.8476<System.Object>()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass1>::ClassMethod1329() ldstr "G2_C392::ClassMethod1329.MI.8475()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass1>::ClassMethod1328() ldstr "G1_C7::ClassMethod1328.4817()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G2_C392::Method7.MI.8472<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G3_C1365`1<class BaseClass1> callvirt instance string class G3_C1365`1<class BaseClass1>::ClassMethod4124<object>() ldstr "G3_C1365::ClassMethod4124.15790<System.Object>()" ldstr "class G3_C1365`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1365`1<class BaseClass1> callvirt instance string class G3_C1365`1<class BaseClass1>::ClassMethod4123<object>() ldstr "G3_C1365::ClassMethod4123.15789<System.Object>()" ldstr "class G3_C1365`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1365`1<class BaseClass1> callvirt instance string class G3_C1365`1<class BaseClass1>::ClassMethod4122() ldstr "G3_C1365::ClassMethod4122.15788()" ldstr "class G3_C1365`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1365`1<class BaseClass1> callvirt instance string class G3_C1365`1<class BaseClass1>::ClassMethod4121() ldstr "G3_C1365::ClassMethod4121.15787()" ldstr "class G3_C1365`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1365`1<class BaseClass1> callvirt instance string class G3_C1365`1<class BaseClass1>::Method7<object>() ldstr "G3_C1365::Method7.15786<System.Object>()" ldstr "class G3_C1365`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1365`1<class BaseClass1> callvirt instance string class G3_C1365`1<class BaseClass1>::ClassMethod2132() ldstr "G3_C1365::ClassMethod2132.MI.15792()" ldstr "class G3_C1365`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1365`1<class BaseClass1> callvirt instance string class G3_C1365`1<class BaseClass1>::ClassMethod2131() ldstr "G3_C1365::ClassMethod2131.MI.15791()" ldstr "class G3_C1365`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1365`1<class BaseClass1> callvirt instance string class G3_C1365`1<class BaseClass1>::Method6<object>() ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class G3_C1365`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1365`1<class BaseClass1> callvirt instance string class G3_C1365`1<class BaseClass1>::Method5() ldstr "G2_C392::Method5.8469()" ldstr "class G3_C1365`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1365`1<class BaseClass1> callvirt instance string class G3_C1365`1<class BaseClass1>::Method4() ldstr "G2_C392::Method4.8467()" ldstr "class G3_C1365`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1365`1<class BaseClass1> callvirt instance string class G3_C1365`1<class BaseClass1>::ClassMethod1331<object>() ldstr "G1_C7::ClassMethod1331.4820<System.Object>()" ldstr "class G3_C1365`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1365`1<class BaseClass1> callvirt instance string class G3_C1365`1<class BaseClass1>::ClassMethod1330<object>() ldstr "G2_C392::ClassMethod1330.MI.8476<System.Object>()" ldstr "class G3_C1365`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1365`1<class BaseClass1> callvirt instance string class G3_C1365`1<class BaseClass1>::ClassMethod1329() ldstr "G2_C392::ClassMethod1329.MI.8475()" ldstr "class G3_C1365`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1365`1<class BaseClass1> callvirt instance string class G3_C1365`1<class BaseClass1>::ClassMethod1328() ldstr "G1_C7::ClassMethod1328.4817()" ldstr "class G3_C1365`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G2_C392`2<class BaseClass0,class BaseClass0>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C7`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass1>::ClassMethod1331<object>() ldstr "G1_C7::ClassMethod1331.4820<System.Object>()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass1>::ClassMethod1330<object>() ldstr "G2_C392::ClassMethod1330.MI.8476<System.Object>()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass1>::ClassMethod1329() ldstr "G2_C392::ClassMethod1329.MI.8475()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass1>::ClassMethod1328() ldstr "G1_C7::ClassMethod1328.4817()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G1_C7::Method7.4815<System.Object>()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass1>::Method6<object>() ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass1>::Method5() ldstr "G2_C392::Method5.8469()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass1>::Method4() ldstr "G2_C392::Method4.8467()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G2_C392::Method4.MI.8468()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G2_C392::Method5.8469()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() ldstr "G1_C7::Method7.MI.4816<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G2_C392::Method7.MI.8472<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G2_C392::Method7.MI.8472<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G1_C7::Method7.MI.4816<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G2_C392`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass0>::ClassMethod2132() ldstr "G2_C392::ClassMethod2132.8474()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass0>::ClassMethod2131() ldstr "G2_C392::ClassMethod2131.8473()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G2_C392::Method7.8471<System.Object>()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass0>::Method6<object>() ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass0>::Method5() ldstr "G2_C392::Method5.8469()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass0>::Method4() ldstr "G2_C392::Method4.8467()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass0>::ClassMethod1331<object>() ldstr "G1_C7::ClassMethod1331.4820<System.Object>()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass0>::ClassMethod1330<object>() ldstr "G2_C392::ClassMethod1330.MI.8476<System.Object>()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass0>::ClassMethod1329() ldstr "G2_C392::ClassMethod1329.MI.8475()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass0>::ClassMethod1328() ldstr "G1_C7::ClassMethod1328.4817()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G2_C392`2<class BaseClass0,class BaseClass1>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C7`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass1>::ClassMethod1331<object>() ldstr "G1_C7::ClassMethod1331.4820<System.Object>()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass1>::ClassMethod1330<object>() ldstr "G2_C392::ClassMethod1330.MI.8476<System.Object>()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass1>::ClassMethod1329() ldstr "G2_C392::ClassMethod1329.MI.8475()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass1>::ClassMethod1328() ldstr "G1_C7::ClassMethod1328.4817()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G1_C7::Method7.4815<System.Object>()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass1>::Method6<object>() ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass1>::Method5() ldstr "G2_C392::Method5.8469()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass1>::Method4() ldstr "G2_C392::Method4.8467()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass1>::Method4() ldstr "G2_C392::Method4.8467()" ldstr "class IBase1`1<class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass1>::Method5() ldstr "G2_C392::Method5.8469()" ldstr "class IBase1`1<class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>() ldstr "G1_C7::Method6.MI.4814<System.Object>()" ldstr "class IBase1`1<class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G1_C7::Method7.MI.4816<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G2_C392::Method4.MI.8468()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G2_C392::Method5.8469()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G2_C392::Method7.MI.8472<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G2_C392`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass1>::ClassMethod2132() ldstr "G2_C392::ClassMethod2132.8474()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass1>::ClassMethod2131() ldstr "G2_C392::ClassMethod2131.8473()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G2_C392::Method7.8471<System.Object>()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass1>::Method6<object>() ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass1>::Method5() ldstr "G2_C392::Method5.8469()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass1>::Method4() ldstr "G2_C392::Method4.8467()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass1>::ClassMethod1331<object>() ldstr "G1_C7::ClassMethod1331.4820<System.Object>()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass1>::ClassMethod1330<object>() ldstr "G2_C392::ClassMethod1330.MI.8476<System.Object>()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass1>::ClassMethod1329() ldstr "G2_C392::ClassMethod1329.MI.8475()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass1>::ClassMethod1328() ldstr "G1_C7::ClassMethod1328.4817()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G2_C392::Method7.MI.8472<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G2_C392`2<class BaseClass1,class BaseClass0>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C7`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass1>::ClassMethod1331<object>() ldstr "G1_C7::ClassMethod1331.4820<System.Object>()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass1>::ClassMethod1330<object>() ldstr "G2_C392::ClassMethod1330.MI.8476<System.Object>()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass1>::ClassMethod1329() ldstr "G2_C392::ClassMethod1329.MI.8475()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass1>::ClassMethod1328() ldstr "G1_C7::ClassMethod1328.4817()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G1_C7::Method7.4815<System.Object>()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass1>::Method6<object>() ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass1>::Method5() ldstr "G2_C392::Method5.8469()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass1>::Method4() ldstr "G2_C392::Method4.8467()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G2_C392::Method4.MI.8468()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G2_C392::Method5.8469()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() ldstr "G1_C7::Method7.MI.4816<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G2_C392::Method7.MI.8472<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G2_C392::Method7.MI.8472<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G1_C7::Method7.MI.4816<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G2_C392`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass0>::ClassMethod2132() ldstr "G2_C392::ClassMethod2132.8474()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass0>::ClassMethod2131() ldstr "G2_C392::ClassMethod2131.8473()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass0>::Method7<object>() ldstr "G2_C392::Method7.8471<System.Object>()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass0>::Method6<object>() ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass0>::Method5() ldstr "G2_C392::Method5.8469()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass0>::Method4() ldstr "G2_C392::Method4.8467()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass0>::ClassMethod1331<object>() ldstr "G1_C7::ClassMethod1331.4820<System.Object>()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass0>::ClassMethod1330<object>() ldstr "G2_C392::ClassMethod1330.MI.8476<System.Object>()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass0>::ClassMethod1329() ldstr "G2_C392::ClassMethod1329.MI.8475()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass0>::ClassMethod1328() ldstr "G1_C7::ClassMethod1328.4817()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass1>::Method4() ldstr "G2_C392::Method4.MI.8468()" ldstr "class IBase1`1<class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass1>::Method5() ldstr "G2_C392::Method5.8469()" ldstr "class IBase1`1<class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>() ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class IBase1`1<class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G2_C392`2<class BaseClass1,class BaseClass1>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C7`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass1>::ClassMethod1331<object>() ldstr "G1_C7::ClassMethod1331.4820<System.Object>()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass1>::ClassMethod1330<object>() ldstr "G2_C392::ClassMethod1330.MI.8476<System.Object>()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass1>::ClassMethod1329() ldstr "G2_C392::ClassMethod1329.MI.8475()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass1>::ClassMethod1328() ldstr "G1_C7::ClassMethod1328.4817()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G1_C7::Method7.4815<System.Object>()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass1>::Method6<object>() ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass1>::Method5() ldstr "G2_C392::Method5.8469()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass1>::Method4() ldstr "G2_C392::Method4.8467()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass1>::Method4() ldstr "G2_C392::Method4.MI.8468()" ldstr "class IBase1`1<class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass1>::Method5() ldstr "G2_C392::Method5.8469()" ldstr "class IBase1`1<class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>() ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class IBase1`1<class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G1_C7::Method7.MI.4816<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G2_C392::Method4.MI.8468()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G2_C392::Method5.8469()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G2_C392::Method7.MI.8472<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G2_C392`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass1>::ClassMethod2132() ldstr "G2_C392::ClassMethod2132.8474()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass1>::ClassMethod2131() ldstr "G2_C392::ClassMethod2131.8473()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G2_C392::Method7.8471<System.Object>()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass1>::Method6<object>() ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass1>::Method5() ldstr "G2_C392::Method5.8469()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass1>::Method4() ldstr "G2_C392::Method4.8467()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass1>::ClassMethod1331<object>() ldstr "G1_C7::ClassMethod1331.4820<System.Object>()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass1>::ClassMethod1330<object>() ldstr "G2_C392::ClassMethod1330.MI.8476<System.Object>()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass1>::ClassMethod1329() ldstr "G2_C392::ClassMethod1329.MI.8475()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass1>::ClassMethod1328() ldstr "G1_C7::ClassMethod1328.4817()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G2_C392::Method7.MI.8472<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldstr "========================================================================\n\n" call void [mscorlib]System.Console::WriteLine(string) ret } .method public hidebysig static void ConstrainedCallsTest() cil managed { .maxstack 10 .locals init (object V_0) ldstr "========================== Constrained Calls Test ==========================" call void [mscorlib]System.Console::WriteLine(string) newobj instance void class G3_C1365`1<class BaseClass0>::.ctor() stloc.0 ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G1_C7::Method7.4815<System.Object>()#" call void Generated893::M.G1_C7.T.T<class BaseClass0,class BaseClass1,class G3_C1365`1<class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G1_C7::Method7.4815<System.Object>()#" call void Generated893::M.G1_C7.A.T<class BaseClass1,class G3_C1365`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G1_C7::Method7.4815<System.Object>()#" call void Generated893::M.G1_C7.A.B<class G3_C1365`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C392::Method4.MI.8468()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#" call void Generated893::M.IBase1.T<class BaseClass0,class G3_C1365`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C392::Method4.MI.8468()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#" call void Generated893::M.IBase1.A<class G3_C1365`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G3_C1365::Method7.15786<System.Object>()#" call void Generated893::M.IBase2.T.T<class BaseClass1,class BaseClass0,class G3_C1365`1<class BaseClass0>>(!!2,string) ldloc.0 ldstr "G3_C1365::Method7.15786<System.Object>()#" call void Generated893::M.IBase2.B.T<class BaseClass0,class G3_C1365`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G3_C1365::Method7.15786<System.Object>()#" call void Generated893::M.IBase2.B.A<class G3_C1365`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G3_C1365::Method7.15786<System.Object>()#" call void Generated893::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G3_C1365`1<class BaseClass0>>(!!2,string) ldloc.0 ldstr "G3_C1365::Method7.15786<System.Object>()#" call void Generated893::M.IBase2.A.T<class BaseClass0,class G3_C1365`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G3_C1365::Method7.15786<System.Object>()#" call void Generated893::M.IBase2.A.A<class G3_C1365`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G3_C1365::Method7.15786<System.Object>()#" call void Generated893::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G3_C1365`1<class BaseClass0>>(!!2,string) ldloc.0 ldstr "G3_C1365::Method7.15786<System.Object>()#" call void Generated893::M.IBase2.A.T<class BaseClass1,class G3_C1365`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G3_C1365::Method7.15786<System.Object>()#" call void Generated893::M.IBase2.A.B<class G3_C1365`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G3_C1365::Method7.15786<System.Object>()#" call void Generated893::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G3_C1365`1<class BaseClass0>>(!!2,string) ldloc.0 ldstr "G3_C1365::Method7.15786<System.Object>()#" call void Generated893::M.IBase2.B.T<class BaseClass1,class G3_C1365`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G3_C1365::Method7.15786<System.Object>()#" call void Generated893::M.IBase2.B.B<class G3_C1365`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G3_C1365::ClassMethod2131.MI.15791()#G3_C1365::ClassMethod2132.MI.15792()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G3_C1365::Method7.15786<System.Object>()#" call void Generated893::M.G2_C392.T.T<class BaseClass1,class BaseClass0,class G3_C1365`1<class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G3_C1365::ClassMethod2131.MI.15791()#G3_C1365::ClassMethod2132.MI.15792()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G3_C1365::Method7.15786<System.Object>()#" call void Generated893::M.G2_C392.B.T<class BaseClass0,class G3_C1365`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G3_C1365::ClassMethod2131.MI.15791()#G3_C1365::ClassMethod2132.MI.15792()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G3_C1365::Method7.15786<System.Object>()#" call void Generated893::M.G2_C392.B.A<class G3_C1365`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C392::Method4.MI.8468()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#" call void Generated893::M.IBase1.T<class BaseClass1,class G3_C1365`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C392::Method4.MI.8468()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#" call void Generated893::M.IBase1.B<class G3_C1365`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G3_C1365::ClassMethod2131.MI.15791()#G3_C1365::ClassMethod2132.MI.15792()#G3_C1365::ClassMethod4121.15787()#G3_C1365::ClassMethod4122.15788()#G3_C1365::ClassMethod4123.15789<System.Object>()#G3_C1365::ClassMethod4124.15790<System.Object>()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G3_C1365::Method7.15786<System.Object>()#" call void Generated893::M.G3_C1365.T<class BaseClass0,class G3_C1365`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G3_C1365::ClassMethod2131.MI.15791()#G3_C1365::ClassMethod2132.MI.15792()#G3_C1365::ClassMethod4121.15787()#G3_C1365::ClassMethod4122.15788()#G3_C1365::ClassMethod4123.15789<System.Object>()#G3_C1365::ClassMethod4124.15790<System.Object>()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G3_C1365::Method7.15786<System.Object>()#" call void Generated893::M.G3_C1365.A<class G3_C1365`1<class BaseClass0>>(!!0,string) newobj instance void class G3_C1365`1<class BaseClass1>::.ctor() stloc.0 ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G1_C7::Method7.4815<System.Object>()#" call void Generated893::M.G1_C7.T.T<class BaseClass1,class BaseClass1,class G3_C1365`1<class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G1_C7::Method7.4815<System.Object>()#" call void Generated893::M.G1_C7.B.T<class BaseClass1,class G3_C1365`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G1_C7::Method7.4815<System.Object>()#" call void Generated893::M.G1_C7.B.B<class G3_C1365`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C392::Method4.MI.8468()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#" call void Generated893::M.IBase1.T<class BaseClass1,class G3_C1365`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C392::Method4.MI.8468()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#" call void Generated893::M.IBase1.B<class G3_C1365`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G3_C1365::Method7.15786<System.Object>()#" call void Generated893::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G3_C1365`1<class BaseClass1>>(!!2,string) ldloc.0 ldstr "G3_C1365::Method7.15786<System.Object>()#" call void Generated893::M.IBase2.B.T<class BaseClass1,class G3_C1365`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G3_C1365::Method7.15786<System.Object>()#" call void Generated893::M.IBase2.B.B<class G3_C1365`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C392::Method4.MI.8468()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#" call void Generated893::M.IBase1.T<class BaseClass0,class G3_C1365`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C392::Method4.MI.8468()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#" call void Generated893::M.IBase1.A<class G3_C1365`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G3_C1365::Method7.15786<System.Object>()#" call void Generated893::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G3_C1365`1<class BaseClass1>>(!!2,string) ldloc.0 ldstr "G3_C1365::Method7.15786<System.Object>()#" call void Generated893::M.IBase2.A.T<class BaseClass1,class G3_C1365`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G3_C1365::Method7.15786<System.Object>()#" call void Generated893::M.IBase2.A.B<class G3_C1365`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G3_C1365::ClassMethod2131.MI.15791()#G3_C1365::ClassMethod2132.MI.15792()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G3_C1365::Method7.15786<System.Object>()#" call void Generated893::M.G2_C392.T.T<class BaseClass1,class BaseClass1,class G3_C1365`1<class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G3_C1365::ClassMethod2131.MI.15791()#G3_C1365::ClassMethod2132.MI.15792()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G3_C1365::Method7.15786<System.Object>()#" call void Generated893::M.G2_C392.B.T<class BaseClass1,class G3_C1365`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G3_C1365::ClassMethod2131.MI.15791()#G3_C1365::ClassMethod2132.MI.15792()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G3_C1365::Method7.15786<System.Object>()#" call void Generated893::M.G2_C392.B.B<class G3_C1365`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C392::Method7.MI.8472<System.Object>()#" call void Generated893::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G3_C1365`1<class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C392::Method7.MI.8472<System.Object>()#" call void Generated893::M.IBase2.A.T<class BaseClass0,class G3_C1365`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C392::Method7.MI.8472<System.Object>()#" call void Generated893::M.IBase2.A.A<class G3_C1365`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G3_C1365::ClassMethod2131.MI.15791()#G3_C1365::ClassMethod2132.MI.15792()#G3_C1365::ClassMethod4121.15787()#G3_C1365::ClassMethod4122.15788()#G3_C1365::ClassMethod4123.15789<System.Object>()#G3_C1365::ClassMethod4124.15790<System.Object>()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G3_C1365::Method7.15786<System.Object>()#" call void Generated893::M.G3_C1365.T<class BaseClass1,class G3_C1365`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G3_C1365::ClassMethod2131.MI.15791()#G3_C1365::ClassMethod2132.MI.15792()#G3_C1365::ClassMethod4121.15787()#G3_C1365::ClassMethod4122.15788()#G3_C1365::ClassMethod4123.15789<System.Object>()#G3_C1365::ClassMethod4124.15790<System.Object>()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G3_C1365::Method7.15786<System.Object>()#" call void Generated893::M.G3_C1365.B<class G3_C1365`1<class BaseClass1>>(!!0,string) newobj instance void class G2_C392`2<class BaseClass0,class BaseClass0>::.ctor() stloc.0 ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G1_C7::Method7.4815<System.Object>()#" call void Generated893::M.G1_C7.T.T<class BaseClass0,class BaseClass1,class G2_C392`2<class BaseClass0,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G1_C7::Method7.4815<System.Object>()#" call void Generated893::M.G1_C7.A.T<class BaseClass1,class G2_C392`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G1_C7::Method7.4815<System.Object>()#" call void Generated893::M.G1_C7.A.B<class G2_C392`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C392::Method4.MI.8468()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#" call void Generated893::M.IBase1.T<class BaseClass0,class G2_C392`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C392::Method4.MI.8468()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#" call void Generated893::M.IBase1.A<class G2_C392`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C7::Method7.MI.4816<System.Object>()#" call void Generated893::M.IBase2.T.T<class BaseClass1,class BaseClass0,class G2_C392`2<class BaseClass0,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C7::Method7.MI.4816<System.Object>()#" call void Generated893::M.IBase2.B.T<class BaseClass0,class G2_C392`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C7::Method7.MI.4816<System.Object>()#" call void Generated893::M.IBase2.B.A<class G2_C392`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C392::Method7.MI.8472<System.Object>()#" call void Generated893::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G2_C392`2<class BaseClass0,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C392::Method7.MI.8472<System.Object>()#" call void Generated893::M.IBase2.A.T<class BaseClass0,class G2_C392`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C392::Method7.MI.8472<System.Object>()#" call void Generated893::M.IBase2.A.A<class G2_C392`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C392::Method7.MI.8472<System.Object>()#" call void Generated893::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G2_C392`2<class BaseClass0,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C392::Method7.MI.8472<System.Object>()#" call void Generated893::M.IBase2.A.T<class BaseClass1,class G2_C392`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C392::Method7.MI.8472<System.Object>()#" call void Generated893::M.IBase2.A.B<class G2_C392`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C7::Method7.MI.4816<System.Object>()#" call void Generated893::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G2_C392`2<class BaseClass0,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C7::Method7.MI.4816<System.Object>()#" call void Generated893::M.IBase2.B.T<class BaseClass1,class G2_C392`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C7::Method7.MI.4816<System.Object>()#" call void Generated893::M.IBase2.B.B<class G2_C392`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G2_C392::ClassMethod2131.8473()#G2_C392::ClassMethod2132.8474()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G2_C392::Method7.8471<System.Object>()#" call void Generated893::M.G2_C392.T.T<class BaseClass0,class BaseClass0,class G2_C392`2<class BaseClass0,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G2_C392::ClassMethod2131.8473()#G2_C392::ClassMethod2132.8474()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G2_C392::Method7.8471<System.Object>()#" call void Generated893::M.G2_C392.A.T<class BaseClass0,class G2_C392`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G2_C392::ClassMethod2131.8473()#G2_C392::ClassMethod2132.8474()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G2_C392::Method7.8471<System.Object>()#" call void Generated893::M.G2_C392.A.A<class G2_C392`2<class BaseClass0,class BaseClass0>>(!!0,string) newobj instance void class G2_C392`2<class BaseClass0,class BaseClass1>::.ctor() stloc.0 ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G1_C7::Method7.4815<System.Object>()#" call void Generated893::M.G1_C7.T.T<class BaseClass1,class BaseClass1,class G2_C392`2<class BaseClass0,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G1_C7::Method7.4815<System.Object>()#" call void Generated893::M.G1_C7.B.T<class BaseClass1,class G2_C392`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G1_C7::Method7.4815<System.Object>()#" call void Generated893::M.G1_C7.B.B<class G2_C392`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C392::Method4.8467()#G2_C392::Method5.8469()#G1_C7::Method6.MI.4814<System.Object>()#" call void Generated893::M.IBase1.T<class BaseClass1,class G2_C392`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C392::Method4.8467()#G2_C392::Method5.8469()#G1_C7::Method6.MI.4814<System.Object>()#" call void Generated893::M.IBase1.B<class G2_C392`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C7::Method7.MI.4816<System.Object>()#" call void Generated893::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G2_C392`2<class BaseClass0,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C7::Method7.MI.4816<System.Object>()#" call void Generated893::M.IBase2.B.T<class BaseClass1,class G2_C392`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C7::Method7.MI.4816<System.Object>()#" call void Generated893::M.IBase2.B.B<class G2_C392`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C392::Method4.MI.8468()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#" call void Generated893::M.IBase1.T<class BaseClass0,class G2_C392`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C392::Method4.MI.8468()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#" call void Generated893::M.IBase1.A<class G2_C392`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C392::Method7.MI.8472<System.Object>()#" call void Generated893::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G2_C392`2<class BaseClass0,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C392::Method7.MI.8472<System.Object>()#" call void Generated893::M.IBase2.A.T<class BaseClass1,class G2_C392`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C392::Method7.MI.8472<System.Object>()#" call void Generated893::M.IBase2.A.B<class G2_C392`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G2_C392::ClassMethod2131.8473()#G2_C392::ClassMethod2132.8474()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G2_C392::Method7.8471<System.Object>()#" call void Generated893::M.G2_C392.T.T<class BaseClass0,class BaseClass1,class G2_C392`2<class BaseClass0,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G2_C392::ClassMethod2131.8473()#G2_C392::ClassMethod2132.8474()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G2_C392::Method7.8471<System.Object>()#" call void Generated893::M.G2_C392.A.T<class BaseClass1,class G2_C392`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G2_C392::ClassMethod2131.8473()#G2_C392::ClassMethod2132.8474()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G2_C392::Method7.8471<System.Object>()#" call void Generated893::M.G2_C392.A.B<class G2_C392`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C392::Method7.MI.8472<System.Object>()#" call void Generated893::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G2_C392`2<class BaseClass0,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C392::Method7.MI.8472<System.Object>()#" call void Generated893::M.IBase2.A.T<class BaseClass0,class G2_C392`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C392::Method7.MI.8472<System.Object>()#" call void Generated893::M.IBase2.A.A<class G2_C392`2<class BaseClass0,class BaseClass1>>(!!0,string) newobj instance void class G2_C392`2<class BaseClass1,class BaseClass0>::.ctor() stloc.0 ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G1_C7::Method7.4815<System.Object>()#" call void Generated893::M.G1_C7.T.T<class BaseClass0,class BaseClass1,class G2_C392`2<class BaseClass1,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G1_C7::Method7.4815<System.Object>()#" call void Generated893::M.G1_C7.A.T<class BaseClass1,class G2_C392`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G1_C7::Method7.4815<System.Object>()#" call void Generated893::M.G1_C7.A.B<class G2_C392`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C392::Method4.MI.8468()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#" call void Generated893::M.IBase1.T<class BaseClass0,class G2_C392`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C392::Method4.MI.8468()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#" call void Generated893::M.IBase1.A<class G2_C392`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C7::Method7.MI.4816<System.Object>()#" call void Generated893::M.IBase2.T.T<class BaseClass1,class BaseClass0,class G2_C392`2<class BaseClass1,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C7::Method7.MI.4816<System.Object>()#" call void Generated893::M.IBase2.B.T<class BaseClass0,class G2_C392`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C7::Method7.MI.4816<System.Object>()#" call void Generated893::M.IBase2.B.A<class G2_C392`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C392::Method7.MI.8472<System.Object>()#" call void Generated893::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G2_C392`2<class BaseClass1,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C392::Method7.MI.8472<System.Object>()#" call void Generated893::M.IBase2.A.T<class BaseClass0,class G2_C392`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C392::Method7.MI.8472<System.Object>()#" call void Generated893::M.IBase2.A.A<class G2_C392`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C392::Method7.MI.8472<System.Object>()#" call void Generated893::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G2_C392`2<class BaseClass1,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C392::Method7.MI.8472<System.Object>()#" call void Generated893::M.IBase2.A.T<class BaseClass1,class G2_C392`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C392::Method7.MI.8472<System.Object>()#" call void Generated893::M.IBase2.A.B<class G2_C392`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C7::Method7.MI.4816<System.Object>()#" call void Generated893::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G2_C392`2<class BaseClass1,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C7::Method7.MI.4816<System.Object>()#" call void Generated893::M.IBase2.B.T<class BaseClass1,class G2_C392`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C7::Method7.MI.4816<System.Object>()#" call void Generated893::M.IBase2.B.B<class G2_C392`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G2_C392::ClassMethod2131.8473()#G2_C392::ClassMethod2132.8474()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G2_C392::Method7.8471<System.Object>()#" call void Generated893::M.G2_C392.T.T<class BaseClass1,class BaseClass0,class G2_C392`2<class BaseClass1,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G2_C392::ClassMethod2131.8473()#G2_C392::ClassMethod2132.8474()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G2_C392::Method7.8471<System.Object>()#" call void Generated893::M.G2_C392.B.T<class BaseClass0,class G2_C392`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G2_C392::ClassMethod2131.8473()#G2_C392::ClassMethod2132.8474()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G2_C392::Method7.8471<System.Object>()#" call void Generated893::M.G2_C392.B.A<class G2_C392`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C392::Method4.MI.8468()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#" call void Generated893::M.IBase1.T<class BaseClass1,class G2_C392`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C392::Method4.MI.8468()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#" call void Generated893::M.IBase1.B<class G2_C392`2<class BaseClass1,class BaseClass0>>(!!0,string) newobj instance void class G2_C392`2<class BaseClass1,class BaseClass1>::.ctor() stloc.0 ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G1_C7::Method7.4815<System.Object>()#" call void Generated893::M.G1_C7.T.T<class BaseClass1,class BaseClass1,class G2_C392`2<class BaseClass1,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G1_C7::Method7.4815<System.Object>()#" call void Generated893::M.G1_C7.B.T<class BaseClass1,class G2_C392`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G1_C7::Method7.4815<System.Object>()#" call void Generated893::M.G1_C7.B.B<class G2_C392`2<class BaseClass1,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C392::Method4.MI.8468()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#" call void Generated893::M.IBase1.T<class BaseClass1,class G2_C392`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C392::Method4.MI.8468()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#" call void Generated893::M.IBase1.B<class G2_C392`2<class BaseClass1,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C7::Method7.MI.4816<System.Object>()#" call void Generated893::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G2_C392`2<class BaseClass1,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C7::Method7.MI.4816<System.Object>()#" call void Generated893::M.IBase2.B.T<class BaseClass1,class G2_C392`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C7::Method7.MI.4816<System.Object>()#" call void Generated893::M.IBase2.B.B<class G2_C392`2<class BaseClass1,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C392::Method4.MI.8468()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#" call void Generated893::M.IBase1.T<class BaseClass0,class G2_C392`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C392::Method4.MI.8468()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#" call void Generated893::M.IBase1.A<class G2_C392`2<class BaseClass1,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C392::Method7.MI.8472<System.Object>()#" call void Generated893::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G2_C392`2<class BaseClass1,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C392::Method7.MI.8472<System.Object>()#" call void Generated893::M.IBase2.A.T<class BaseClass1,class G2_C392`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C392::Method7.MI.8472<System.Object>()#" call void Generated893::M.IBase2.A.B<class G2_C392`2<class BaseClass1,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G2_C392::ClassMethod2131.8473()#G2_C392::ClassMethod2132.8474()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G2_C392::Method7.8471<System.Object>()#" call void Generated893::M.G2_C392.T.T<class BaseClass1,class BaseClass1,class G2_C392`2<class BaseClass1,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G2_C392::ClassMethod2131.8473()#G2_C392::ClassMethod2132.8474()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G2_C392::Method7.8471<System.Object>()#" call void Generated893::M.G2_C392.B.T<class BaseClass1,class G2_C392`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G2_C392::ClassMethod2131.8473()#G2_C392::ClassMethod2132.8474()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G2_C392::Method7.8471<System.Object>()#" call void Generated893::M.G2_C392.B.B<class G2_C392`2<class BaseClass1,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C392::Method7.MI.8472<System.Object>()#" call void Generated893::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G2_C392`2<class BaseClass1,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C392::Method7.MI.8472<System.Object>()#" call void Generated893::M.IBase2.A.T<class BaseClass0,class G2_C392`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C392::Method7.MI.8472<System.Object>()#" call void Generated893::M.IBase2.A.A<class G2_C392`2<class BaseClass1,class BaseClass1>>(!!0,string) ldstr "========================================================================\n\n" call void [mscorlib]System.Console::WriteLine(string) ret } .method public hidebysig static void StructConstrainedInterfaceCallsTest() cil managed { .maxstack 10 ldstr "===================== Struct Constrained Interface Calls Test =====================" call void [mscorlib]System.Console::WriteLine(string) ldstr "========================================================================\n\n" call void [mscorlib]System.Console::WriteLine(string) ret } .method public hidebysig static void CalliTest() cil managed { .maxstack 10 .locals init (object V_0) ldstr "========================== Method Calli Test ==========================" call void [mscorlib]System.Console::WriteLine(string) newobj instance void class G3_C1365`1<class BaseClass0>::.ctor() stloc.0 ldloc.0 castclass class G1_C7`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass0,class BaseClass1>::ClassMethod1331<object>() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G1_C7::ClassMethod1331.4820<System.Object>()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass0,class BaseClass1>::ClassMethod1330<object>() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G2_C392::ClassMethod1330.MI.8476<System.Object>()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass0,class BaseClass1>::ClassMethod1329() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G2_C392::ClassMethod1329.MI.8475()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass0,class BaseClass1>::ClassMethod1328() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G1_C7::ClassMethod1328.4817()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G1_C7::Method7.4815<System.Object>()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass0,class BaseClass1>::Method6<object>() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass0,class BaseClass1>::Method5() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G2_C392::Method5.8469()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass0,class BaseClass1>::Method4() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G2_C392::Method4.8467()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G2_C392::Method4.MI.8468()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G2_C392::Method5.8469()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G3_C1365::Method7.15786<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G3_C1365::Method7.15786<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G3_C1365::Method7.15786<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G3_C1365::Method7.15786<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass0>::ClassMethod2132() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G3_C1365::ClassMethod2132.MI.15792()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass0>::ClassMethod2131() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G3_C1365::ClassMethod2131.MI.15791()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass0>::Method7<object>() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G3_C1365::Method7.15786<System.Object>()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass0>::Method6<object>() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass0>::Method5() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G2_C392::Method5.8469()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass0>::Method4() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G2_C392::Method4.8467()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass0>::ClassMethod1331<object>() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G1_C7::ClassMethod1331.4820<System.Object>()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass0>::ClassMethod1330<object>() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G2_C392::ClassMethod1330.MI.8476<System.Object>()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass0>::ClassMethod1329() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G2_C392::ClassMethod1329.MI.8475()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass0>::ClassMethod1328() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G1_C7::ClassMethod1328.4817()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method4() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G2_C392::Method4.MI.8468()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method5() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G2_C392::Method5.8469()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method6<object>() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1365`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1365`1<class BaseClass0>::ClassMethod4124<object>() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G3_C1365::ClassMethod4124.15790<System.Object>()" ldstr "class G3_C1365`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1365`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1365`1<class BaseClass0>::ClassMethod4123<object>() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G3_C1365::ClassMethod4123.15789<System.Object>()" ldstr "class G3_C1365`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1365`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1365`1<class BaseClass0>::ClassMethod4122() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G3_C1365::ClassMethod4122.15788()" ldstr "class G3_C1365`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1365`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1365`1<class BaseClass0>::ClassMethod4121() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G3_C1365::ClassMethod4121.15787()" ldstr "class G3_C1365`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1365`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1365`1<class BaseClass0>::Method7<object>() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G3_C1365::Method7.15786<System.Object>()" ldstr "class G3_C1365`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1365`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1365`1<class BaseClass0>::ClassMethod2132() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G3_C1365::ClassMethod2132.MI.15792()" ldstr "class G3_C1365`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1365`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1365`1<class BaseClass0>::ClassMethod2131() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G3_C1365::ClassMethod2131.MI.15791()" ldstr "class G3_C1365`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1365`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1365`1<class BaseClass0>::Method6<object>() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class G3_C1365`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1365`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1365`1<class BaseClass0>::Method5() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G2_C392::Method5.8469()" ldstr "class G3_C1365`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1365`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1365`1<class BaseClass0>::Method4() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G2_C392::Method4.8467()" ldstr "class G3_C1365`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1365`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1365`1<class BaseClass0>::ClassMethod1331<object>() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G1_C7::ClassMethod1331.4820<System.Object>()" ldstr "class G3_C1365`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1365`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1365`1<class BaseClass0>::ClassMethod1330<object>() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G2_C392::ClassMethod1330.MI.8476<System.Object>()" ldstr "class G3_C1365`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1365`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1365`1<class BaseClass0>::ClassMethod1329() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G2_C392::ClassMethod1329.MI.8475()" ldstr "class G3_C1365`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1365`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1365`1<class BaseClass0>::ClassMethod1328() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G1_C7::ClassMethod1328.4817()" ldstr "class G3_C1365`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G3_C1365`1<class BaseClass1>::.ctor() stloc.0 ldloc.0 castclass class G1_C7`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass1,class BaseClass1>::ClassMethod1331<object>() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G1_C7::ClassMethod1331.4820<System.Object>()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass1,class BaseClass1>::ClassMethod1330<object>() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G2_C392::ClassMethod1330.MI.8476<System.Object>()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass1,class BaseClass1>::ClassMethod1329() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G2_C392::ClassMethod1329.MI.8475()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass1,class BaseClass1>::ClassMethod1328() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G1_C7::ClassMethod1328.4817()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G1_C7::Method7.4815<System.Object>()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass1,class BaseClass1>::Method6<object>() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass1,class BaseClass1>::Method5() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G2_C392::Method5.8469()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass1,class BaseClass1>::Method4() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G2_C392::Method4.8467()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method4() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G2_C392::Method4.MI.8468()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method5() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G2_C392::Method5.8469()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method6<object>() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G3_C1365::Method7.15786<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G2_C392::Method4.MI.8468()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G2_C392::Method5.8469()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G3_C1365::Method7.15786<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass1>::ClassMethod2132() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G3_C1365::ClassMethod2132.MI.15792()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass1>::ClassMethod2131() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G3_C1365::ClassMethod2131.MI.15791()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G3_C1365::Method7.15786<System.Object>()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass1>::Method6<object>() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass1>::Method5() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G2_C392::Method5.8469()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass1>::Method4() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G2_C392::Method4.8467()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass1>::ClassMethod1331<object>() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G1_C7::ClassMethod1331.4820<System.Object>()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass1>::ClassMethod1330<object>() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G2_C392::ClassMethod1330.MI.8476<System.Object>()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass1>::ClassMethod1329() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G2_C392::ClassMethod1329.MI.8475()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass1>::ClassMethod1328() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G1_C7::ClassMethod1328.4817()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G2_C392::Method7.MI.8472<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1365`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1365`1<class BaseClass1>::ClassMethod4124<object>() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G3_C1365::ClassMethod4124.15790<System.Object>()" ldstr "class G3_C1365`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1365`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1365`1<class BaseClass1>::ClassMethod4123<object>() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G3_C1365::ClassMethod4123.15789<System.Object>()" ldstr "class G3_C1365`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1365`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1365`1<class BaseClass1>::ClassMethod4122() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G3_C1365::ClassMethod4122.15788()" ldstr "class G3_C1365`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1365`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1365`1<class BaseClass1>::ClassMethod4121() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G3_C1365::ClassMethod4121.15787()" ldstr "class G3_C1365`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1365`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1365`1<class BaseClass1>::Method7<object>() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G3_C1365::Method7.15786<System.Object>()" ldstr "class G3_C1365`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1365`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1365`1<class BaseClass1>::ClassMethod2132() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G3_C1365::ClassMethod2132.MI.15792()" ldstr "class G3_C1365`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1365`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1365`1<class BaseClass1>::ClassMethod2131() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G3_C1365::ClassMethod2131.MI.15791()" ldstr "class G3_C1365`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1365`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1365`1<class BaseClass1>::Method6<object>() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class G3_C1365`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1365`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1365`1<class BaseClass1>::Method5() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G2_C392::Method5.8469()" ldstr "class G3_C1365`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1365`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1365`1<class BaseClass1>::Method4() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G2_C392::Method4.8467()" ldstr "class G3_C1365`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1365`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1365`1<class BaseClass1>::ClassMethod1331<object>() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G1_C7::ClassMethod1331.4820<System.Object>()" ldstr "class G3_C1365`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1365`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1365`1<class BaseClass1>::ClassMethod1330<object>() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G2_C392::ClassMethod1330.MI.8476<System.Object>()" ldstr "class G3_C1365`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1365`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1365`1<class BaseClass1>::ClassMethod1329() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G2_C392::ClassMethod1329.MI.8475()" ldstr "class G3_C1365`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1365`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1365`1<class BaseClass1>::ClassMethod1328() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G1_C7::ClassMethod1328.4817()" ldstr "class G3_C1365`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G2_C392`2<class BaseClass0,class BaseClass0>::.ctor() stloc.0 ldloc.0 castclass class G1_C7`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass0,class BaseClass1>::ClassMethod1331<object>() calli default string(class G2_C392`2<class BaseClass0,class BaseClass0>) ldstr "G1_C7::ClassMethod1331.4820<System.Object>()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass0,class BaseClass1>::ClassMethod1330<object>() calli default string(class G2_C392`2<class BaseClass0,class BaseClass0>) ldstr "G2_C392::ClassMethod1330.MI.8476<System.Object>()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass0,class BaseClass1>::ClassMethod1329() calli default string(class G2_C392`2<class BaseClass0,class BaseClass0>) ldstr "G2_C392::ClassMethod1329.MI.8475()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass0,class BaseClass1>::ClassMethod1328() calli default string(class G2_C392`2<class BaseClass0,class BaseClass0>) ldstr "G1_C7::ClassMethod1328.4817()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C392`2<class BaseClass0,class BaseClass0>) ldstr "G1_C7::Method7.4815<System.Object>()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass0,class BaseClass1>::Method6<object>() calli default string(class G2_C392`2<class BaseClass0,class BaseClass0>) ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass0,class BaseClass1>::Method5() calli default string(class G2_C392`2<class BaseClass0,class BaseClass0>) ldstr "G2_C392::Method5.8469()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass0,class BaseClass1>::Method4() calli default string(class G2_C392`2<class BaseClass0,class BaseClass0>) ldstr "G2_C392::Method4.8467()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G2_C392`2<class BaseClass0,class BaseClass0>) ldstr "G2_C392::Method4.MI.8468()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G2_C392`2<class BaseClass0,class BaseClass0>) ldstr "G2_C392::Method5.8469()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G2_C392`2<class BaseClass0,class BaseClass0>) ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() calli default string(class G2_C392`2<class BaseClass0,class BaseClass0>) ldstr "G1_C7::Method7.MI.4816<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G2_C392`2<class BaseClass0,class BaseClass0>) ldstr "G2_C392::Method7.MI.8472<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C392`2<class BaseClass0,class BaseClass0>) ldstr "G2_C392::Method7.MI.8472<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G2_C392`2<class BaseClass0,class BaseClass0>) ldstr "G1_C7::Method7.MI.4816<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass0,class BaseClass0>::ClassMethod2132() calli default string(class G2_C392`2<class BaseClass0,class BaseClass0>) ldstr "G2_C392::ClassMethod2132.8474()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass0,class BaseClass0>::ClassMethod2131() calli default string(class G2_C392`2<class BaseClass0,class BaseClass0>) ldstr "G2_C392::ClassMethod2131.8473()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G2_C392`2<class BaseClass0,class BaseClass0>) ldstr "G2_C392::Method7.8471<System.Object>()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass0,class BaseClass0>::Method6<object>() calli default string(class G2_C392`2<class BaseClass0,class BaseClass0>) ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass0,class BaseClass0>::Method5() calli default string(class G2_C392`2<class BaseClass0,class BaseClass0>) ldstr "G2_C392::Method5.8469()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass0,class BaseClass0>::Method4() calli default string(class G2_C392`2<class BaseClass0,class BaseClass0>) ldstr "G2_C392::Method4.8467()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass0,class BaseClass0>::ClassMethod1331<object>() calli default string(class G2_C392`2<class BaseClass0,class BaseClass0>) ldstr "G1_C7::ClassMethod1331.4820<System.Object>()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass0,class BaseClass0>::ClassMethod1330<object>() calli default string(class G2_C392`2<class BaseClass0,class BaseClass0>) ldstr "G2_C392::ClassMethod1330.MI.8476<System.Object>()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass0,class BaseClass0>::ClassMethod1329() calli default string(class G2_C392`2<class BaseClass0,class BaseClass0>) ldstr "G2_C392::ClassMethod1329.MI.8475()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass0,class BaseClass0>::ClassMethod1328() calli default string(class G2_C392`2<class BaseClass0,class BaseClass0>) ldstr "G1_C7::ClassMethod1328.4817()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G2_C392`2<class BaseClass0,class BaseClass1>::.ctor() stloc.0 ldloc.0 castclass class G1_C7`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass1,class BaseClass1>::ClassMethod1331<object>() calli default string(class G2_C392`2<class BaseClass0,class BaseClass1>) ldstr "G1_C7::ClassMethod1331.4820<System.Object>()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass1,class BaseClass1>::ClassMethod1330<object>() calli default string(class G2_C392`2<class BaseClass0,class BaseClass1>) ldstr "G2_C392::ClassMethod1330.MI.8476<System.Object>()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass1,class BaseClass1>::ClassMethod1329() calli default string(class G2_C392`2<class BaseClass0,class BaseClass1>) ldstr "G2_C392::ClassMethod1329.MI.8475()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass1,class BaseClass1>::ClassMethod1328() calli default string(class G2_C392`2<class BaseClass0,class BaseClass1>) ldstr "G1_C7::ClassMethod1328.4817()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G2_C392`2<class BaseClass0,class BaseClass1>) ldstr "G1_C7::Method7.4815<System.Object>()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass1,class BaseClass1>::Method6<object>() calli default string(class G2_C392`2<class BaseClass0,class BaseClass1>) ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass1,class BaseClass1>::Method5() calli default string(class G2_C392`2<class BaseClass0,class BaseClass1>) ldstr "G2_C392::Method5.8469()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass1,class BaseClass1>::Method4() calli default string(class G2_C392`2<class BaseClass0,class BaseClass1>) ldstr "G2_C392::Method4.8467()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method4() calli default string(class G2_C392`2<class BaseClass0,class BaseClass1>) ldstr "G2_C392::Method4.8467()" ldstr "class IBase1`1<class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method5() calli default string(class G2_C392`2<class BaseClass0,class BaseClass1>) ldstr "G2_C392::Method5.8469()" ldstr "class IBase1`1<class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method6<object>() calli default string(class G2_C392`2<class BaseClass0,class BaseClass1>) ldstr "G1_C7::Method6.MI.4814<System.Object>()" ldstr "class IBase1`1<class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G2_C392`2<class BaseClass0,class BaseClass1>) ldstr "G1_C7::Method7.MI.4816<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G2_C392`2<class BaseClass0,class BaseClass1>) ldstr "G2_C392::Method4.MI.8468()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G2_C392`2<class BaseClass0,class BaseClass1>) ldstr "G2_C392::Method5.8469()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G2_C392`2<class BaseClass0,class BaseClass1>) ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C392`2<class BaseClass0,class BaseClass1>) ldstr "G2_C392::Method7.MI.8472<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass0,class BaseClass1>::ClassMethod2132() calli default string(class G2_C392`2<class BaseClass0,class BaseClass1>) ldstr "G2_C392::ClassMethod2132.8474()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass0,class BaseClass1>::ClassMethod2131() calli default string(class G2_C392`2<class BaseClass0,class BaseClass1>) ldstr "G2_C392::ClassMethod2131.8473()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C392`2<class BaseClass0,class BaseClass1>) ldstr "G2_C392::Method7.8471<System.Object>()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass0,class BaseClass1>::Method6<object>() calli default string(class G2_C392`2<class BaseClass0,class BaseClass1>) ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass0,class BaseClass1>::Method5() calli default string(class G2_C392`2<class BaseClass0,class BaseClass1>) ldstr "G2_C392::Method5.8469()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass0,class BaseClass1>::Method4() calli default string(class G2_C392`2<class BaseClass0,class BaseClass1>) ldstr "G2_C392::Method4.8467()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass0,class BaseClass1>::ClassMethod1331<object>() calli default string(class G2_C392`2<class BaseClass0,class BaseClass1>) ldstr "G1_C7::ClassMethod1331.4820<System.Object>()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass0,class BaseClass1>::ClassMethod1330<object>() calli default string(class G2_C392`2<class BaseClass0,class BaseClass1>) ldstr "G2_C392::ClassMethod1330.MI.8476<System.Object>()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass0,class BaseClass1>::ClassMethod1329() calli default string(class G2_C392`2<class BaseClass0,class BaseClass1>) ldstr "G2_C392::ClassMethod1329.MI.8475()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass0,class BaseClass1>::ClassMethod1328() calli default string(class G2_C392`2<class BaseClass0,class BaseClass1>) ldstr "G1_C7::ClassMethod1328.4817()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G2_C392`2<class BaseClass0,class BaseClass1>) ldstr "G2_C392::Method7.MI.8472<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G2_C392`2<class BaseClass1,class BaseClass0>::.ctor() stloc.0 ldloc.0 castclass class G1_C7`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass0,class BaseClass1>::ClassMethod1331<object>() calli default string(class G2_C392`2<class BaseClass1,class BaseClass0>) ldstr "G1_C7::ClassMethod1331.4820<System.Object>()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass0,class BaseClass1>::ClassMethod1330<object>() calli default string(class G2_C392`2<class BaseClass1,class BaseClass0>) ldstr "G2_C392::ClassMethod1330.MI.8476<System.Object>()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass0,class BaseClass1>::ClassMethod1329() calli default string(class G2_C392`2<class BaseClass1,class BaseClass0>) ldstr "G2_C392::ClassMethod1329.MI.8475()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass0,class BaseClass1>::ClassMethod1328() calli default string(class G2_C392`2<class BaseClass1,class BaseClass0>) ldstr "G1_C7::ClassMethod1328.4817()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C392`2<class BaseClass1,class BaseClass0>) ldstr "G1_C7::Method7.4815<System.Object>()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass0,class BaseClass1>::Method6<object>() calli default string(class G2_C392`2<class BaseClass1,class BaseClass0>) ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass0,class BaseClass1>::Method5() calli default string(class G2_C392`2<class BaseClass1,class BaseClass0>) ldstr "G2_C392::Method5.8469()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass0,class BaseClass1>::Method4() calli default string(class G2_C392`2<class BaseClass1,class BaseClass0>) ldstr "G2_C392::Method4.8467()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G2_C392`2<class BaseClass1,class BaseClass0>) ldstr "G2_C392::Method4.MI.8468()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G2_C392`2<class BaseClass1,class BaseClass0>) ldstr "G2_C392::Method5.8469()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G2_C392`2<class BaseClass1,class BaseClass0>) ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() calli default string(class G2_C392`2<class BaseClass1,class BaseClass0>) ldstr "G1_C7::Method7.MI.4816<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G2_C392`2<class BaseClass1,class BaseClass0>) ldstr "G2_C392::Method7.MI.8472<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C392`2<class BaseClass1,class BaseClass0>) ldstr "G2_C392::Method7.MI.8472<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G2_C392`2<class BaseClass1,class BaseClass0>) ldstr "G1_C7::Method7.MI.4816<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass0>::ClassMethod2132() calli default string(class G2_C392`2<class BaseClass1,class BaseClass0>) ldstr "G2_C392::ClassMethod2132.8474()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass0>::ClassMethod2131() calli default string(class G2_C392`2<class BaseClass1,class BaseClass0>) ldstr "G2_C392::ClassMethod2131.8473()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass0>::Method7<object>() calli default string(class G2_C392`2<class BaseClass1,class BaseClass0>) ldstr "G2_C392::Method7.8471<System.Object>()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass0>::Method6<object>() calli default string(class G2_C392`2<class BaseClass1,class BaseClass0>) ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass0>::Method5() calli default string(class G2_C392`2<class BaseClass1,class BaseClass0>) ldstr "G2_C392::Method5.8469()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass0>::Method4() calli default string(class G2_C392`2<class BaseClass1,class BaseClass0>) ldstr "G2_C392::Method4.8467()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass0>::ClassMethod1331<object>() calli default string(class G2_C392`2<class BaseClass1,class BaseClass0>) ldstr "G1_C7::ClassMethod1331.4820<System.Object>()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass0>::ClassMethod1330<object>() calli default string(class G2_C392`2<class BaseClass1,class BaseClass0>) ldstr "G2_C392::ClassMethod1330.MI.8476<System.Object>()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass0>::ClassMethod1329() calli default string(class G2_C392`2<class BaseClass1,class BaseClass0>) ldstr "G2_C392::ClassMethod1329.MI.8475()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass0>::ClassMethod1328() calli default string(class G2_C392`2<class BaseClass1,class BaseClass0>) ldstr "G1_C7::ClassMethod1328.4817()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method4() calli default string(class G2_C392`2<class BaseClass1,class BaseClass0>) ldstr "G2_C392::Method4.MI.8468()" ldstr "class IBase1`1<class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method5() calli default string(class G2_C392`2<class BaseClass1,class BaseClass0>) ldstr "G2_C392::Method5.8469()" ldstr "class IBase1`1<class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method6<object>() calli default string(class G2_C392`2<class BaseClass1,class BaseClass0>) ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class IBase1`1<class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G2_C392`2<class BaseClass1,class BaseClass1>::.ctor() stloc.0 ldloc.0 castclass class G1_C7`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass1,class BaseClass1>::ClassMethod1331<object>() calli default string(class G2_C392`2<class BaseClass1,class BaseClass1>) ldstr "G1_C7::ClassMethod1331.4820<System.Object>()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass1,class BaseClass1>::ClassMethod1330<object>() calli default string(class G2_C392`2<class BaseClass1,class BaseClass1>) ldstr "G2_C392::ClassMethod1330.MI.8476<System.Object>()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass1,class BaseClass1>::ClassMethod1329() calli default string(class G2_C392`2<class BaseClass1,class BaseClass1>) ldstr "G2_C392::ClassMethod1329.MI.8475()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass1,class BaseClass1>::ClassMethod1328() calli default string(class G2_C392`2<class BaseClass1,class BaseClass1>) ldstr "G1_C7::ClassMethod1328.4817()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G2_C392`2<class BaseClass1,class BaseClass1>) ldstr "G1_C7::Method7.4815<System.Object>()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass1,class BaseClass1>::Method6<object>() calli default string(class G2_C392`2<class BaseClass1,class BaseClass1>) ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass1,class BaseClass1>::Method5() calli default string(class G2_C392`2<class BaseClass1,class BaseClass1>) ldstr "G2_C392::Method5.8469()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass1,class BaseClass1>::Method4() calli default string(class G2_C392`2<class BaseClass1,class BaseClass1>) ldstr "G2_C392::Method4.8467()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method4() calli default string(class G2_C392`2<class BaseClass1,class BaseClass1>) ldstr "G2_C392::Method4.MI.8468()" ldstr "class IBase1`1<class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method5() calli default string(class G2_C392`2<class BaseClass1,class BaseClass1>) ldstr "G2_C392::Method5.8469()" ldstr "class IBase1`1<class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method6<object>() calli default string(class G2_C392`2<class BaseClass1,class BaseClass1>) ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class IBase1`1<class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G2_C392`2<class BaseClass1,class BaseClass1>) ldstr "G1_C7::Method7.MI.4816<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G2_C392`2<class BaseClass1,class BaseClass1>) ldstr "G2_C392::Method4.MI.8468()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G2_C392`2<class BaseClass1,class BaseClass1>) ldstr "G2_C392::Method5.8469()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G2_C392`2<class BaseClass1,class BaseClass1>) ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C392`2<class BaseClass1,class BaseClass1>) ldstr "G2_C392::Method7.MI.8472<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass1>::ClassMethod2132() calli default string(class G2_C392`2<class BaseClass1,class BaseClass1>) ldstr "G2_C392::ClassMethod2132.8474()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass1>::ClassMethod2131() calli default string(class G2_C392`2<class BaseClass1,class BaseClass1>) ldstr "G2_C392::ClassMethod2131.8473()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G2_C392`2<class BaseClass1,class BaseClass1>) ldstr "G2_C392::Method7.8471<System.Object>()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass1>::Method6<object>() calli default string(class G2_C392`2<class BaseClass1,class BaseClass1>) ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass1>::Method5() calli default string(class G2_C392`2<class BaseClass1,class BaseClass1>) ldstr "G2_C392::Method5.8469()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass1>::Method4() calli default string(class G2_C392`2<class BaseClass1,class BaseClass1>) ldstr "G2_C392::Method4.8467()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass1>::ClassMethod1331<object>() calli default string(class G2_C392`2<class BaseClass1,class BaseClass1>) ldstr "G1_C7::ClassMethod1331.4820<System.Object>()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass1>::ClassMethod1330<object>() calli default string(class G2_C392`2<class BaseClass1,class BaseClass1>) ldstr "G2_C392::ClassMethod1330.MI.8476<System.Object>()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass1>::ClassMethod1329() calli default string(class G2_C392`2<class BaseClass1,class BaseClass1>) ldstr "G2_C392::ClassMethod1329.MI.8475()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass1>::ClassMethod1328() calli default string(class G2_C392`2<class BaseClass1,class BaseClass1>) ldstr "G1_C7::ClassMethod1328.4817()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G2_C392`2<class BaseClass1,class BaseClass1>) ldstr "G2_C392::Method7.MI.8472<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldstr "========================================================================\n\n" call void [mscorlib]System.Console::WriteLine(string) ret } .method public hidebysig static int32 Main() cil managed { .custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = ( 01 00 00 00 ) .entrypoint .maxstack 10 call void Generated893::MethodCallingTest() call void Generated893::ConstrainedCallsTest() call void Generated893::StructConstrainedInterfaceCallsTest() call void Generated893::CalliTest() ldc.i4 100 ret } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern mscorlib { .publickeytoken = (B7 7A 5C 56 19 34 E0 89 ) .ver 4:0:0:0 } .assembly extern TestFramework { .publickeytoken = ( B0 3F 5F 7F 11 D5 0A 3A ) } //TYPES IN FORWARDER ASSEMBLIES: //TEST ASSEMBLY: .assembly Generated893 { .hash algorithm 0x00008004 } .assembly extern xunit.core {} .class public BaseClass0 { .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void [mscorlib]System.Object::.ctor() ret } } .class public BaseClass1 extends BaseClass0 { .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void BaseClass0::.ctor() ret } } .class public G3_C1365`1<T0> extends class G2_C392`2<class BaseClass1,!T0> implements class IBase2`2<class BaseClass1,!T0> { .method public hidebysig virtual instance string Method7<M0>() cil managed noinlining { ldstr "G3_C1365::Method7.15786<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string ClassMethod4121() cil managed noinlining { ldstr "G3_C1365::ClassMethod4121.15787()" ret } .method public hidebysig newslot virtual instance string ClassMethod4122() cil managed noinlining { ldstr "G3_C1365::ClassMethod4122.15788()" ret } .method public hidebysig newslot virtual instance string ClassMethod4123<M0>() cil managed noinlining { ldstr "G3_C1365::ClassMethod4123.15789<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string ClassMethod4124<M0>() cil managed noinlining { ldstr "G3_C1365::ClassMethod4124.15790<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string 'G2_C392<class BaseClass1,T0>.ClassMethod2131'() cil managed noinlining { .override method instance string class G2_C392`2<class BaseClass1,!T0>::ClassMethod2131() ldstr "G3_C1365::ClassMethod2131.MI.15791()" ret } .method public hidebysig newslot virtual instance string 'G2_C392<class BaseClass1,T0>.ClassMethod2132'() cil managed noinlining { .override method instance string class G2_C392`2<class BaseClass1,!T0>::ClassMethod2132() ldstr "G3_C1365::ClassMethod2132.MI.15792()" ret } .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void class G2_C392`2<class BaseClass1,!T0>::.ctor() ret } } .class public G2_C392`2<T0, T1> extends class G1_C7`2<!T1,class BaseClass1> implements class IBase1`1<!T0>, class IBase2`2<class BaseClass0,class BaseClass0> { .method public hidebysig virtual instance string Method4() cil managed noinlining { ldstr "G2_C392::Method4.8467()" ret } .method public hidebysig newslot virtual instance string 'IBase1<T0>.Method4'() cil managed noinlining { .override method instance string class IBase1`1<!T0>::Method4() ldstr "G2_C392::Method4.MI.8468()" ret } .method public hidebysig virtual instance string Method5() cil managed noinlining { ldstr "G2_C392::Method5.8469()" ret } .method public hidebysig virtual instance string Method6<M0>() cil managed noinlining { ldstr "G2_C392::Method6.8470<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string Method7<M0>() cil managed noinlining { ldstr "G2_C392::Method7.8471<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string 'IBase2<class BaseClass0,class BaseClass0>.Method7'<M0>() cil managed noinlining { .override method instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<[1]>() ldstr "G2_C392::Method7.MI.8472<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string ClassMethod2131() cil managed noinlining { ldstr "G2_C392::ClassMethod2131.8473()" ret } .method public hidebysig newslot virtual instance string ClassMethod2132() cil managed noinlining { ldstr "G2_C392::ClassMethod2132.8474()" ret } .method public hidebysig newslot virtual instance string 'G1_C7<T1,class BaseClass1>.ClassMethod1329'() cil managed noinlining { .override method instance string class G1_C7`2<!T1,class BaseClass1>::ClassMethod1329() ldstr "G2_C392::ClassMethod1329.MI.8475()" ret } .method public hidebysig newslot virtual instance string 'G1_C7<T1,class BaseClass1>.ClassMethod1330'<M0>() cil managed noinlining { .override method instance string class G1_C7`2<!T1,class BaseClass1>::ClassMethod1330<[1]>() ldstr "G2_C392::ClassMethod1330.MI.8476<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void class G1_C7`2<!T1,class BaseClass1>::.ctor() ret } } .class interface public abstract IBase2`2<+T0, -T1> { .method public hidebysig newslot abstract virtual instance string Method7<M0>() cil managed { } } .class public abstract G1_C7`2<T0, T1> implements class IBase1`1<!T0>, class IBase2`2<class BaseClass1,!T0> { .method public hidebysig virtual instance string Method4() cil managed noinlining { ldstr "G1_C7::Method4.4811()" ret } .method public hidebysig newslot virtual instance string Method5() cil managed noinlining { ldstr "G1_C7::Method5.4812()" ret } .method public hidebysig newslot virtual instance string Method6<M0>() cil managed noinlining { ldstr "G1_C7::Method6.4813<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string 'IBase1<T0>.Method6'<M0>() cil managed noinlining { .override method instance string class IBase1`1<!T0>::Method6<[1]>() ldstr "G1_C7::Method6.MI.4814<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig virtual instance string Method7<M0>() cil managed noinlining { ldstr "G1_C7::Method7.4815<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string 'IBase2<class BaseClass1,T0>.Method7'<M0>() cil managed noinlining { .override method instance string class IBase2`2<class BaseClass1,!T0>::Method7<[1]>() ldstr "G1_C7::Method7.MI.4816<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string ClassMethod1328() cil managed noinlining { ldstr "G1_C7::ClassMethod1328.4817()" ret } .method public hidebysig newslot virtual instance string ClassMethod1329() cil managed noinlining { ldstr "G1_C7::ClassMethod1329.4818()" ret } .method public hidebysig newslot virtual instance string ClassMethod1330<M0>() cil managed noinlining { ldstr "G1_C7::ClassMethod1330.4819<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string ClassMethod1331<M0>() cil managed noinlining { ldstr "G1_C7::ClassMethod1331.4820<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void [mscorlib]System.Object::.ctor() ret } } .class interface public abstract IBase1`1<+T0> { .method public hidebysig newslot abstract virtual instance string Method4() cil managed { } .method public hidebysig newslot abstract virtual instance string Method5() cil managed { } .method public hidebysig newslot abstract virtual instance string Method6<M0>() cil managed { } } .class public auto ansi beforefieldinit Generated893 { .method static void M.BaseClass0<(BaseClass0)W>(!!W inst, string exp) cil managed { .maxstack 5 .locals init (string[] actualResults) ldc.i4.s 0 newarr string stloc.s actualResults ldarg.1 ldstr "M.BaseClass0<(BaseClass0)W>(!!W inst, string exp)" ldc.i4.s 0 ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.BaseClass1<(BaseClass1)W>(!!W inst, string exp) cil managed { .maxstack 5 .locals init (string[] actualResults) ldc.i4.s 0 newarr string stloc.s actualResults ldarg.1 ldstr "M.BaseClass1<(BaseClass1)W>(!!W inst, string exp)" ldc.i4.s 0 ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G3_C1365.T<T0,(class G3_C1365`1<!!T0>)W>(!!W 'inst', string exp) cil managed { .maxstack 19 .locals init (string[] actualResults) ldc.i4.s 14 newarr string stloc.s actualResults ldarg.1 ldstr "M.G3_C1365.T<T0,(class G3_C1365`1<!!T0>)W>(!!W 'inst', string exp)" ldc.i4.s 14 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<!!T0>::ClassMethod1328() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<!!T0>::ClassMethod1329() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<!!T0>::ClassMethod1330<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<!!T0>::ClassMethod1331<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<!!T0>::ClassMethod2131() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<!!T0>::ClassMethod2132() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<!!T0>::ClassMethod4121() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<!!T0>::ClassMethod4122() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<!!T0>::ClassMethod4123<object>() stelem.ref ldloc.s actualResults ldc.i4.s 9 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<!!T0>::ClassMethod4124<object>() stelem.ref ldloc.s actualResults ldc.i4.s 10 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<!!T0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 11 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<!!T0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 12 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<!!T0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 13 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<!!T0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G3_C1365.A<(class G3_C1365`1<class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 19 .locals init (string[] actualResults) ldc.i4.s 14 newarr string stloc.s actualResults ldarg.1 ldstr "M.G3_C1365.A<(class G3_C1365`1<class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 14 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<class BaseClass0>::ClassMethod1328() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<class BaseClass0>::ClassMethod1329() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<class BaseClass0>::ClassMethod1330<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<class BaseClass0>::ClassMethod1331<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<class BaseClass0>::ClassMethod2131() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<class BaseClass0>::ClassMethod2132() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<class BaseClass0>::ClassMethod4121() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<class BaseClass0>::ClassMethod4122() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<class BaseClass0>::ClassMethod4123<object>() stelem.ref ldloc.s actualResults ldc.i4.s 9 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<class BaseClass0>::ClassMethod4124<object>() stelem.ref ldloc.s actualResults ldc.i4.s 10 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<class BaseClass0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 11 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<class BaseClass0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 12 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<class BaseClass0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 13 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G3_C1365.B<(class G3_C1365`1<class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 19 .locals init (string[] actualResults) ldc.i4.s 14 newarr string stloc.s actualResults ldarg.1 ldstr "M.G3_C1365.B<(class G3_C1365`1<class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 14 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<class BaseClass1>::ClassMethod1328() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<class BaseClass1>::ClassMethod1329() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<class BaseClass1>::ClassMethod1330<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<class BaseClass1>::ClassMethod1331<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<class BaseClass1>::ClassMethod2131() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<class BaseClass1>::ClassMethod2132() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<class BaseClass1>::ClassMethod4121() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<class BaseClass1>::ClassMethod4122() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<class BaseClass1>::ClassMethod4123<object>() stelem.ref ldloc.s actualResults ldc.i4.s 9 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<class BaseClass1>::ClassMethod4124<object>() stelem.ref ldloc.s actualResults ldc.i4.s 10 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<class BaseClass1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 11 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<class BaseClass1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 12 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<class BaseClass1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 13 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1365`1<class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C392.T.T<T0,T1,(class G2_C392`2<!!T0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 15 .locals init (string[] actualResults) ldc.i4.s 10 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C392.T.T<T0,T1,(class G2_C392`2<!!T0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 10 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<!!T0,!!T1>::ClassMethod1328() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<!!T0,!!T1>::ClassMethod1329() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<!!T0,!!T1>::ClassMethod1330<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<!!T0,!!T1>::ClassMethod1331<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<!!T0,!!T1>::ClassMethod2131() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<!!T0,!!T1>::ClassMethod2132() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<!!T0,!!T1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<!!T0,!!T1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<!!T0,!!T1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 9 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<!!T0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C392.A.T<T1,(class G2_C392`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 15 .locals init (string[] actualResults) ldc.i4.s 10 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C392.A.T<T1,(class G2_C392`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 10 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass0,!!T1>::ClassMethod1328() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass0,!!T1>::ClassMethod1329() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass0,!!T1>::ClassMethod1330<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass0,!!T1>::ClassMethod1331<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass0,!!T1>::ClassMethod2131() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass0,!!T1>::ClassMethod2132() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass0,!!T1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass0,!!T1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass0,!!T1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 9 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C392.A.A<(class G2_C392`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 15 .locals init (string[] actualResults) ldc.i4.s 10 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C392.A.A<(class G2_C392`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 10 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass0>::ClassMethod1328() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass0>::ClassMethod1329() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass0>::ClassMethod1330<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass0>::ClassMethod1331<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass0>::ClassMethod2131() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass0>::ClassMethod2132() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 9 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C392.A.B<(class G2_C392`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 15 .locals init (string[] actualResults) ldc.i4.s 10 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C392.A.B<(class G2_C392`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 10 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass1>::ClassMethod1328() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass1>::ClassMethod1329() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass1>::ClassMethod1330<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass1>::ClassMethod1331<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass1>::ClassMethod2131() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass1>::ClassMethod2132() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 9 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C392.B.T<T1,(class G2_C392`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 15 .locals init (string[] actualResults) ldc.i4.s 10 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C392.B.T<T1,(class G2_C392`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 10 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass1,!!T1>::ClassMethod1328() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass1,!!T1>::ClassMethod1329() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass1,!!T1>::ClassMethod1330<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass1,!!T1>::ClassMethod1331<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass1,!!T1>::ClassMethod2131() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass1,!!T1>::ClassMethod2132() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass1,!!T1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass1,!!T1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass1,!!T1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 9 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass1,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C392.B.A<(class G2_C392`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 15 .locals init (string[] actualResults) ldc.i4.s 10 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C392.B.A<(class G2_C392`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 10 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass0>::ClassMethod1328() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass0>::ClassMethod1329() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass0>::ClassMethod1330<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass0>::ClassMethod1331<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass0>::ClassMethod2131() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass0>::ClassMethod2132() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 9 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C392.B.B<(class G2_C392`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 15 .locals init (string[] actualResults) ldc.i4.s 10 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C392.B.B<(class G2_C392`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 10 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass1>::ClassMethod1328() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass1>::ClassMethod1329() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass1>::ClassMethod1330<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass1>::ClassMethod1331<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass1>::ClassMethod2131() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass1>::ClassMethod2132() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 9 ldarga.s 0 constrained. !!W callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.T.T<T0,T1,(class IBase2`2<!!T0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.T.T<T0,T1,(class IBase2`2<!!T0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<!!T0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.A.T<T1,(class IBase2`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.A.T<T1,(class IBase2`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.A.A<(class IBase2`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.A.A<(class IBase2`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.A.B<(class IBase2`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.A.B<(class IBase2`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.B.T<T1,(class IBase2`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.B.T<T1,(class IBase2`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass1,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.B.A<(class IBase2`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.B.A<(class IBase2`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.B.B<(class IBase2`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.B.B<(class IBase2`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C7.T.T<T0,T1,(class G1_C7`2<!!T0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 13 .locals init (string[] actualResults) ldc.i4.s 8 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C7.T.T<T0,T1,(class G1_C7`2<!!T0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 8 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<!!T0,!!T1>::ClassMethod1328() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<!!T0,!!T1>::ClassMethod1329() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<!!T0,!!T1>::ClassMethod1330<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<!!T0,!!T1>::ClassMethod1331<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<!!T0,!!T1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<!!T0,!!T1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<!!T0,!!T1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<!!T0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C7.A.T<T1,(class G1_C7`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 13 .locals init (string[] actualResults) ldc.i4.s 8 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C7.A.T<T1,(class G1_C7`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 8 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass0,!!T1>::ClassMethod1328() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass0,!!T1>::ClassMethod1329() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass0,!!T1>::ClassMethod1330<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass0,!!T1>::ClassMethod1331<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass0,!!T1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass0,!!T1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass0,!!T1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C7.A.A<(class G1_C7`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 13 .locals init (string[] actualResults) ldc.i4.s 8 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C7.A.A<(class G1_C7`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 8 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass0>::ClassMethod1328() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass0>::ClassMethod1329() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass0>::ClassMethod1330<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass0>::ClassMethod1331<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C7.A.B<(class G1_C7`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 13 .locals init (string[] actualResults) ldc.i4.s 8 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C7.A.B<(class G1_C7`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 8 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass1>::ClassMethod1328() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass1>::ClassMethod1329() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass1>::ClassMethod1330<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass1>::ClassMethod1331<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C7.B.T<T1,(class G1_C7`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 13 .locals init (string[] actualResults) ldc.i4.s 8 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C7.B.T<T1,(class G1_C7`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 8 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass1,!!T1>::ClassMethod1328() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass1,!!T1>::ClassMethod1329() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass1,!!T1>::ClassMethod1330<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass1,!!T1>::ClassMethod1331<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass1,!!T1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass1,!!T1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass1,!!T1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass1,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C7.B.A<(class G1_C7`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 13 .locals init (string[] actualResults) ldc.i4.s 8 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C7.B.A<(class G1_C7`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 8 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass0>::ClassMethod1328() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass0>::ClassMethod1329() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass0>::ClassMethod1330<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass0>::ClassMethod1331<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C7.B.B<(class G1_C7`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 13 .locals init (string[] actualResults) ldc.i4.s 8 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C7.B.B<(class G1_C7`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 8 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass1>::ClassMethod1328() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass1>::ClassMethod1329() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass1>::ClassMethod1330<object>() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass1>::ClassMethod1331<object>() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase1.T<T0,(class IBase1`1<!!T0>)W>(!!W 'inst', string exp) cil managed { .maxstack 8 .locals init (string[] actualResults) ldc.i4.s 3 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase1.T<T0,(class IBase1`1<!!T0>)W>(!!W 'inst', string exp)" ldc.i4.s 3 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<!!T0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<!!T0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<!!T0>::Method6<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase1.A<(class IBase1`1<class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 8 .locals init (string[] actualResults) ldc.i4.s 3 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase1.A<(class IBase1`1<class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 3 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase1.B<(class IBase1`1<class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 8 .locals init (string[] actualResults) ldc.i4.s 3 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase1.B<(class IBase1`1<class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 3 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method public hidebysig static void MethodCallingTest() cil managed { .maxstack 10 .locals init (object V_0) ldstr "========================== Method Calling Test ==========================" call void [mscorlib]System.Console::WriteLine(string) newobj instance void class G3_C1365`1<class BaseClass0>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C7`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass1>::ClassMethod1331<object>() ldstr "G1_C7::ClassMethod1331.4820<System.Object>()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass1>::ClassMethod1330<object>() ldstr "G2_C392::ClassMethod1330.MI.8476<System.Object>()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass1>::ClassMethod1329() ldstr "G2_C392::ClassMethod1329.MI.8475()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass1>::ClassMethod1328() ldstr "G1_C7::ClassMethod1328.4817()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G1_C7::Method7.4815<System.Object>()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass1>::Method6<object>() ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass1>::Method5() ldstr "G2_C392::Method5.8469()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass1>::Method4() ldstr "G2_C392::Method4.8467()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G2_C392::Method4.MI.8468()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G2_C392::Method5.8469()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() ldstr "G3_C1365::Method7.15786<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G3_C1365::Method7.15786<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G3_C1365::Method7.15786<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G3_C1365::Method7.15786<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G2_C392`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass0>::ClassMethod2132() ldstr "G3_C1365::ClassMethod2132.MI.15792()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass0>::ClassMethod2131() ldstr "G3_C1365::ClassMethod2131.MI.15791()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass0>::Method7<object>() ldstr "G3_C1365::Method7.15786<System.Object>()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass0>::Method6<object>() ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass0>::Method5() ldstr "G2_C392::Method5.8469()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass0>::Method4() ldstr "G2_C392::Method4.8467()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass0>::ClassMethod1331<object>() ldstr "G1_C7::ClassMethod1331.4820<System.Object>()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass0>::ClassMethod1330<object>() ldstr "G2_C392::ClassMethod1330.MI.8476<System.Object>()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass0>::ClassMethod1329() ldstr "G2_C392::ClassMethod1329.MI.8475()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass0>::ClassMethod1328() ldstr "G1_C7::ClassMethod1328.4817()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass1>::Method4() ldstr "G2_C392::Method4.MI.8468()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass1>::Method5() ldstr "G2_C392::Method5.8469()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>() ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G3_C1365`1<class BaseClass0> callvirt instance string class G3_C1365`1<class BaseClass0>::ClassMethod4124<object>() ldstr "G3_C1365::ClassMethod4124.15790<System.Object>()" ldstr "class G3_C1365`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1365`1<class BaseClass0> callvirt instance string class G3_C1365`1<class BaseClass0>::ClassMethod4123<object>() ldstr "G3_C1365::ClassMethod4123.15789<System.Object>()" ldstr "class G3_C1365`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1365`1<class BaseClass0> callvirt instance string class G3_C1365`1<class BaseClass0>::ClassMethod4122() ldstr "G3_C1365::ClassMethod4122.15788()" ldstr "class G3_C1365`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1365`1<class BaseClass0> callvirt instance string class G3_C1365`1<class BaseClass0>::ClassMethod4121() ldstr "G3_C1365::ClassMethod4121.15787()" ldstr "class G3_C1365`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1365`1<class BaseClass0> callvirt instance string class G3_C1365`1<class BaseClass0>::Method7<object>() ldstr "G3_C1365::Method7.15786<System.Object>()" ldstr "class G3_C1365`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1365`1<class BaseClass0> callvirt instance string class G3_C1365`1<class BaseClass0>::ClassMethod2132() ldstr "G3_C1365::ClassMethod2132.MI.15792()" ldstr "class G3_C1365`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1365`1<class BaseClass0> callvirt instance string class G3_C1365`1<class BaseClass0>::ClassMethod2131() ldstr "G3_C1365::ClassMethod2131.MI.15791()" ldstr "class G3_C1365`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1365`1<class BaseClass0> callvirt instance string class G3_C1365`1<class BaseClass0>::Method6<object>() ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class G3_C1365`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1365`1<class BaseClass0> callvirt instance string class G3_C1365`1<class BaseClass0>::Method5() ldstr "G2_C392::Method5.8469()" ldstr "class G3_C1365`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1365`1<class BaseClass0> callvirt instance string class G3_C1365`1<class BaseClass0>::Method4() ldstr "G2_C392::Method4.8467()" ldstr "class G3_C1365`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1365`1<class BaseClass0> callvirt instance string class G3_C1365`1<class BaseClass0>::ClassMethod1331<object>() ldstr "G1_C7::ClassMethod1331.4820<System.Object>()" ldstr "class G3_C1365`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1365`1<class BaseClass0> callvirt instance string class G3_C1365`1<class BaseClass0>::ClassMethod1330<object>() ldstr "G2_C392::ClassMethod1330.MI.8476<System.Object>()" ldstr "class G3_C1365`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1365`1<class BaseClass0> callvirt instance string class G3_C1365`1<class BaseClass0>::ClassMethod1329() ldstr "G2_C392::ClassMethod1329.MI.8475()" ldstr "class G3_C1365`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1365`1<class BaseClass0> callvirt instance string class G3_C1365`1<class BaseClass0>::ClassMethod1328() ldstr "G1_C7::ClassMethod1328.4817()" ldstr "class G3_C1365`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G3_C1365`1<class BaseClass1>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C7`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass1>::ClassMethod1331<object>() ldstr "G1_C7::ClassMethod1331.4820<System.Object>()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass1>::ClassMethod1330<object>() ldstr "G2_C392::ClassMethod1330.MI.8476<System.Object>()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass1>::ClassMethod1329() ldstr "G2_C392::ClassMethod1329.MI.8475()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass1>::ClassMethod1328() ldstr "G1_C7::ClassMethod1328.4817()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G1_C7::Method7.4815<System.Object>()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass1>::Method6<object>() ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass1>::Method5() ldstr "G2_C392::Method5.8469()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass1>::Method4() ldstr "G2_C392::Method4.8467()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass1>::Method4() ldstr "G2_C392::Method4.MI.8468()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass1>::Method5() ldstr "G2_C392::Method5.8469()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>() ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G3_C1365::Method7.15786<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G2_C392::Method4.MI.8468()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G2_C392::Method5.8469()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G3_C1365::Method7.15786<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G2_C392`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass1>::ClassMethod2132() ldstr "G3_C1365::ClassMethod2132.MI.15792()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass1>::ClassMethod2131() ldstr "G3_C1365::ClassMethod2131.MI.15791()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G3_C1365::Method7.15786<System.Object>()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass1>::Method6<object>() ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass1>::Method5() ldstr "G2_C392::Method5.8469()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass1>::Method4() ldstr "G2_C392::Method4.8467()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass1>::ClassMethod1331<object>() ldstr "G1_C7::ClassMethod1331.4820<System.Object>()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass1>::ClassMethod1330<object>() ldstr "G2_C392::ClassMethod1330.MI.8476<System.Object>()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass1>::ClassMethod1329() ldstr "G2_C392::ClassMethod1329.MI.8475()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass1>::ClassMethod1328() ldstr "G1_C7::ClassMethod1328.4817()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G2_C392::Method7.MI.8472<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G3_C1365`1<class BaseClass1> callvirt instance string class G3_C1365`1<class BaseClass1>::ClassMethod4124<object>() ldstr "G3_C1365::ClassMethod4124.15790<System.Object>()" ldstr "class G3_C1365`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1365`1<class BaseClass1> callvirt instance string class G3_C1365`1<class BaseClass1>::ClassMethod4123<object>() ldstr "G3_C1365::ClassMethod4123.15789<System.Object>()" ldstr "class G3_C1365`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1365`1<class BaseClass1> callvirt instance string class G3_C1365`1<class BaseClass1>::ClassMethod4122() ldstr "G3_C1365::ClassMethod4122.15788()" ldstr "class G3_C1365`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1365`1<class BaseClass1> callvirt instance string class G3_C1365`1<class BaseClass1>::ClassMethod4121() ldstr "G3_C1365::ClassMethod4121.15787()" ldstr "class G3_C1365`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1365`1<class BaseClass1> callvirt instance string class G3_C1365`1<class BaseClass1>::Method7<object>() ldstr "G3_C1365::Method7.15786<System.Object>()" ldstr "class G3_C1365`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1365`1<class BaseClass1> callvirt instance string class G3_C1365`1<class BaseClass1>::ClassMethod2132() ldstr "G3_C1365::ClassMethod2132.MI.15792()" ldstr "class G3_C1365`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1365`1<class BaseClass1> callvirt instance string class G3_C1365`1<class BaseClass1>::ClassMethod2131() ldstr "G3_C1365::ClassMethod2131.MI.15791()" ldstr "class G3_C1365`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1365`1<class BaseClass1> callvirt instance string class G3_C1365`1<class BaseClass1>::Method6<object>() ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class G3_C1365`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1365`1<class BaseClass1> callvirt instance string class G3_C1365`1<class BaseClass1>::Method5() ldstr "G2_C392::Method5.8469()" ldstr "class G3_C1365`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1365`1<class BaseClass1> callvirt instance string class G3_C1365`1<class BaseClass1>::Method4() ldstr "G2_C392::Method4.8467()" ldstr "class G3_C1365`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1365`1<class BaseClass1> callvirt instance string class G3_C1365`1<class BaseClass1>::ClassMethod1331<object>() ldstr "G1_C7::ClassMethod1331.4820<System.Object>()" ldstr "class G3_C1365`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1365`1<class BaseClass1> callvirt instance string class G3_C1365`1<class BaseClass1>::ClassMethod1330<object>() ldstr "G2_C392::ClassMethod1330.MI.8476<System.Object>()" ldstr "class G3_C1365`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1365`1<class BaseClass1> callvirt instance string class G3_C1365`1<class BaseClass1>::ClassMethod1329() ldstr "G2_C392::ClassMethod1329.MI.8475()" ldstr "class G3_C1365`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1365`1<class BaseClass1> callvirt instance string class G3_C1365`1<class BaseClass1>::ClassMethod1328() ldstr "G1_C7::ClassMethod1328.4817()" ldstr "class G3_C1365`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G2_C392`2<class BaseClass0,class BaseClass0>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C7`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass1>::ClassMethod1331<object>() ldstr "G1_C7::ClassMethod1331.4820<System.Object>()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass1>::ClassMethod1330<object>() ldstr "G2_C392::ClassMethod1330.MI.8476<System.Object>()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass1>::ClassMethod1329() ldstr "G2_C392::ClassMethod1329.MI.8475()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass1>::ClassMethod1328() ldstr "G1_C7::ClassMethod1328.4817()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G1_C7::Method7.4815<System.Object>()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass1>::Method6<object>() ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass1>::Method5() ldstr "G2_C392::Method5.8469()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass1>::Method4() ldstr "G2_C392::Method4.8467()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G2_C392::Method4.MI.8468()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G2_C392::Method5.8469()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() ldstr "G1_C7::Method7.MI.4816<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G2_C392::Method7.MI.8472<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G2_C392::Method7.MI.8472<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G1_C7::Method7.MI.4816<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G2_C392`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass0>::ClassMethod2132() ldstr "G2_C392::ClassMethod2132.8474()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass0>::ClassMethod2131() ldstr "G2_C392::ClassMethod2131.8473()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G2_C392::Method7.8471<System.Object>()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass0>::Method6<object>() ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass0>::Method5() ldstr "G2_C392::Method5.8469()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass0>::Method4() ldstr "G2_C392::Method4.8467()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass0>::ClassMethod1331<object>() ldstr "G1_C7::ClassMethod1331.4820<System.Object>()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass0>::ClassMethod1330<object>() ldstr "G2_C392::ClassMethod1330.MI.8476<System.Object>()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass0>::ClassMethod1329() ldstr "G2_C392::ClassMethod1329.MI.8475()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass0,class BaseClass0> callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass0>::ClassMethod1328() ldstr "G1_C7::ClassMethod1328.4817()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G2_C392`2<class BaseClass0,class BaseClass1>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C7`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass1>::ClassMethod1331<object>() ldstr "G1_C7::ClassMethod1331.4820<System.Object>()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass1>::ClassMethod1330<object>() ldstr "G2_C392::ClassMethod1330.MI.8476<System.Object>()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass1>::ClassMethod1329() ldstr "G2_C392::ClassMethod1329.MI.8475()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass1>::ClassMethod1328() ldstr "G1_C7::ClassMethod1328.4817()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G1_C7::Method7.4815<System.Object>()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass1>::Method6<object>() ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass1>::Method5() ldstr "G2_C392::Method5.8469()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass1>::Method4() ldstr "G2_C392::Method4.8467()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass1>::Method4() ldstr "G2_C392::Method4.8467()" ldstr "class IBase1`1<class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass1>::Method5() ldstr "G2_C392::Method5.8469()" ldstr "class IBase1`1<class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>() ldstr "G1_C7::Method6.MI.4814<System.Object>()" ldstr "class IBase1`1<class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G1_C7::Method7.MI.4816<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G2_C392::Method4.MI.8468()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G2_C392::Method5.8469()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G2_C392::Method7.MI.8472<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G2_C392`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass1>::ClassMethod2132() ldstr "G2_C392::ClassMethod2132.8474()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass1>::ClassMethod2131() ldstr "G2_C392::ClassMethod2131.8473()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G2_C392::Method7.8471<System.Object>()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass1>::Method6<object>() ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass1>::Method5() ldstr "G2_C392::Method5.8469()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass1>::Method4() ldstr "G2_C392::Method4.8467()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass1>::ClassMethod1331<object>() ldstr "G1_C7::ClassMethod1331.4820<System.Object>()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass1>::ClassMethod1330<object>() ldstr "G2_C392::ClassMethod1330.MI.8476<System.Object>()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass1>::ClassMethod1329() ldstr "G2_C392::ClassMethod1329.MI.8475()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass0,class BaseClass1> callvirt instance string class G2_C392`2<class BaseClass0,class BaseClass1>::ClassMethod1328() ldstr "G1_C7::ClassMethod1328.4817()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G2_C392::Method7.MI.8472<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G2_C392`2<class BaseClass1,class BaseClass0>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C7`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass1>::ClassMethod1331<object>() ldstr "G1_C7::ClassMethod1331.4820<System.Object>()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass1>::ClassMethod1330<object>() ldstr "G2_C392::ClassMethod1330.MI.8476<System.Object>()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass1>::ClassMethod1329() ldstr "G2_C392::ClassMethod1329.MI.8475()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass1>::ClassMethod1328() ldstr "G1_C7::ClassMethod1328.4817()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G1_C7::Method7.4815<System.Object>()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass1>::Method6<object>() ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass1>::Method5() ldstr "G2_C392::Method5.8469()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass0,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass0,class BaseClass1>::Method4() ldstr "G2_C392::Method4.8467()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G2_C392::Method4.MI.8468()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G2_C392::Method5.8469()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() ldstr "G1_C7::Method7.MI.4816<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G2_C392::Method7.MI.8472<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G2_C392::Method7.MI.8472<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G1_C7::Method7.MI.4816<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G2_C392`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass0>::ClassMethod2132() ldstr "G2_C392::ClassMethod2132.8474()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass0>::ClassMethod2131() ldstr "G2_C392::ClassMethod2131.8473()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass0>::Method7<object>() ldstr "G2_C392::Method7.8471<System.Object>()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass0>::Method6<object>() ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass0>::Method5() ldstr "G2_C392::Method5.8469()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass0>::Method4() ldstr "G2_C392::Method4.8467()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass0>::ClassMethod1331<object>() ldstr "G1_C7::ClassMethod1331.4820<System.Object>()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass0>::ClassMethod1330<object>() ldstr "G2_C392::ClassMethod1330.MI.8476<System.Object>()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass0>::ClassMethod1329() ldstr "G2_C392::ClassMethod1329.MI.8475()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass0> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass0>::ClassMethod1328() ldstr "G1_C7::ClassMethod1328.4817()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass1>::Method4() ldstr "G2_C392::Method4.MI.8468()" ldstr "class IBase1`1<class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass1>::Method5() ldstr "G2_C392::Method5.8469()" ldstr "class IBase1`1<class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>() ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class IBase1`1<class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G2_C392`2<class BaseClass1,class BaseClass1>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C7`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass1>::ClassMethod1331<object>() ldstr "G1_C7::ClassMethod1331.4820<System.Object>()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass1>::ClassMethod1330<object>() ldstr "G2_C392::ClassMethod1330.MI.8476<System.Object>()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass1>::ClassMethod1329() ldstr "G2_C392::ClassMethod1329.MI.8475()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass1>::ClassMethod1328() ldstr "G1_C7::ClassMethod1328.4817()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G1_C7::Method7.4815<System.Object>()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass1>::Method6<object>() ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass1>::Method5() ldstr "G2_C392::Method5.8469()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C7`2<class BaseClass1,class BaseClass1> callvirt instance string class G1_C7`2<class BaseClass1,class BaseClass1>::Method4() ldstr "G2_C392::Method4.8467()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass1>::Method4() ldstr "G2_C392::Method4.MI.8468()" ldstr "class IBase1`1<class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass1>::Method5() ldstr "G2_C392::Method5.8469()" ldstr "class IBase1`1<class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>() ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class IBase1`1<class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G1_C7::Method7.MI.4816<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G2_C392::Method4.MI.8468()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G2_C392::Method5.8469()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G2_C392::Method7.MI.8472<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G2_C392`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass1>::ClassMethod2132() ldstr "G2_C392::ClassMethod2132.8474()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass1>::ClassMethod2131() ldstr "G2_C392::ClassMethod2131.8473()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G2_C392::Method7.8471<System.Object>()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass1>::Method6<object>() ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass1>::Method5() ldstr "G2_C392::Method5.8469()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass1>::Method4() ldstr "G2_C392::Method4.8467()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass1>::ClassMethod1331<object>() ldstr "G1_C7::ClassMethod1331.4820<System.Object>()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass1>::ClassMethod1330<object>() ldstr "G2_C392::ClassMethod1330.MI.8476<System.Object>()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass1>::ClassMethod1329() ldstr "G2_C392::ClassMethod1329.MI.8475()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C392`2<class BaseClass1,class BaseClass1> callvirt instance string class G2_C392`2<class BaseClass1,class BaseClass1>::ClassMethod1328() ldstr "G1_C7::ClassMethod1328.4817()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G2_C392::Method7.MI.8472<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldstr "========================================================================\n\n" call void [mscorlib]System.Console::WriteLine(string) ret } .method public hidebysig static void ConstrainedCallsTest() cil managed { .maxstack 10 .locals init (object V_0) ldstr "========================== Constrained Calls Test ==========================" call void [mscorlib]System.Console::WriteLine(string) newobj instance void class G3_C1365`1<class BaseClass0>::.ctor() stloc.0 ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G1_C7::Method7.4815<System.Object>()#" call void Generated893::M.G1_C7.T.T<class BaseClass0,class BaseClass1,class G3_C1365`1<class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G1_C7::Method7.4815<System.Object>()#" call void Generated893::M.G1_C7.A.T<class BaseClass1,class G3_C1365`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G1_C7::Method7.4815<System.Object>()#" call void Generated893::M.G1_C7.A.B<class G3_C1365`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C392::Method4.MI.8468()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#" call void Generated893::M.IBase1.T<class BaseClass0,class G3_C1365`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C392::Method4.MI.8468()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#" call void Generated893::M.IBase1.A<class G3_C1365`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G3_C1365::Method7.15786<System.Object>()#" call void Generated893::M.IBase2.T.T<class BaseClass1,class BaseClass0,class G3_C1365`1<class BaseClass0>>(!!2,string) ldloc.0 ldstr "G3_C1365::Method7.15786<System.Object>()#" call void Generated893::M.IBase2.B.T<class BaseClass0,class G3_C1365`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G3_C1365::Method7.15786<System.Object>()#" call void Generated893::M.IBase2.B.A<class G3_C1365`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G3_C1365::Method7.15786<System.Object>()#" call void Generated893::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G3_C1365`1<class BaseClass0>>(!!2,string) ldloc.0 ldstr "G3_C1365::Method7.15786<System.Object>()#" call void Generated893::M.IBase2.A.T<class BaseClass0,class G3_C1365`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G3_C1365::Method7.15786<System.Object>()#" call void Generated893::M.IBase2.A.A<class G3_C1365`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G3_C1365::Method7.15786<System.Object>()#" call void Generated893::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G3_C1365`1<class BaseClass0>>(!!2,string) ldloc.0 ldstr "G3_C1365::Method7.15786<System.Object>()#" call void Generated893::M.IBase2.A.T<class BaseClass1,class G3_C1365`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G3_C1365::Method7.15786<System.Object>()#" call void Generated893::M.IBase2.A.B<class G3_C1365`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G3_C1365::Method7.15786<System.Object>()#" call void Generated893::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G3_C1365`1<class BaseClass0>>(!!2,string) ldloc.0 ldstr "G3_C1365::Method7.15786<System.Object>()#" call void Generated893::M.IBase2.B.T<class BaseClass1,class G3_C1365`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G3_C1365::Method7.15786<System.Object>()#" call void Generated893::M.IBase2.B.B<class G3_C1365`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G3_C1365::ClassMethod2131.MI.15791()#G3_C1365::ClassMethod2132.MI.15792()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G3_C1365::Method7.15786<System.Object>()#" call void Generated893::M.G2_C392.T.T<class BaseClass1,class BaseClass0,class G3_C1365`1<class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G3_C1365::ClassMethod2131.MI.15791()#G3_C1365::ClassMethod2132.MI.15792()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G3_C1365::Method7.15786<System.Object>()#" call void Generated893::M.G2_C392.B.T<class BaseClass0,class G3_C1365`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G3_C1365::ClassMethod2131.MI.15791()#G3_C1365::ClassMethod2132.MI.15792()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G3_C1365::Method7.15786<System.Object>()#" call void Generated893::M.G2_C392.B.A<class G3_C1365`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C392::Method4.MI.8468()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#" call void Generated893::M.IBase1.T<class BaseClass1,class G3_C1365`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C392::Method4.MI.8468()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#" call void Generated893::M.IBase1.B<class G3_C1365`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G3_C1365::ClassMethod2131.MI.15791()#G3_C1365::ClassMethod2132.MI.15792()#G3_C1365::ClassMethod4121.15787()#G3_C1365::ClassMethod4122.15788()#G3_C1365::ClassMethod4123.15789<System.Object>()#G3_C1365::ClassMethod4124.15790<System.Object>()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G3_C1365::Method7.15786<System.Object>()#" call void Generated893::M.G3_C1365.T<class BaseClass0,class G3_C1365`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G3_C1365::ClassMethod2131.MI.15791()#G3_C1365::ClassMethod2132.MI.15792()#G3_C1365::ClassMethod4121.15787()#G3_C1365::ClassMethod4122.15788()#G3_C1365::ClassMethod4123.15789<System.Object>()#G3_C1365::ClassMethod4124.15790<System.Object>()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G3_C1365::Method7.15786<System.Object>()#" call void Generated893::M.G3_C1365.A<class G3_C1365`1<class BaseClass0>>(!!0,string) newobj instance void class G3_C1365`1<class BaseClass1>::.ctor() stloc.0 ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G1_C7::Method7.4815<System.Object>()#" call void Generated893::M.G1_C7.T.T<class BaseClass1,class BaseClass1,class G3_C1365`1<class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G1_C7::Method7.4815<System.Object>()#" call void Generated893::M.G1_C7.B.T<class BaseClass1,class G3_C1365`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G1_C7::Method7.4815<System.Object>()#" call void Generated893::M.G1_C7.B.B<class G3_C1365`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C392::Method4.MI.8468()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#" call void Generated893::M.IBase1.T<class BaseClass1,class G3_C1365`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C392::Method4.MI.8468()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#" call void Generated893::M.IBase1.B<class G3_C1365`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G3_C1365::Method7.15786<System.Object>()#" call void Generated893::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G3_C1365`1<class BaseClass1>>(!!2,string) ldloc.0 ldstr "G3_C1365::Method7.15786<System.Object>()#" call void Generated893::M.IBase2.B.T<class BaseClass1,class G3_C1365`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G3_C1365::Method7.15786<System.Object>()#" call void Generated893::M.IBase2.B.B<class G3_C1365`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C392::Method4.MI.8468()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#" call void Generated893::M.IBase1.T<class BaseClass0,class G3_C1365`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C392::Method4.MI.8468()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#" call void Generated893::M.IBase1.A<class G3_C1365`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G3_C1365::Method7.15786<System.Object>()#" call void Generated893::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G3_C1365`1<class BaseClass1>>(!!2,string) ldloc.0 ldstr "G3_C1365::Method7.15786<System.Object>()#" call void Generated893::M.IBase2.A.T<class BaseClass1,class G3_C1365`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G3_C1365::Method7.15786<System.Object>()#" call void Generated893::M.IBase2.A.B<class G3_C1365`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G3_C1365::ClassMethod2131.MI.15791()#G3_C1365::ClassMethod2132.MI.15792()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G3_C1365::Method7.15786<System.Object>()#" call void Generated893::M.G2_C392.T.T<class BaseClass1,class BaseClass1,class G3_C1365`1<class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G3_C1365::ClassMethod2131.MI.15791()#G3_C1365::ClassMethod2132.MI.15792()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G3_C1365::Method7.15786<System.Object>()#" call void Generated893::M.G2_C392.B.T<class BaseClass1,class G3_C1365`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G3_C1365::ClassMethod2131.MI.15791()#G3_C1365::ClassMethod2132.MI.15792()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G3_C1365::Method7.15786<System.Object>()#" call void Generated893::M.G2_C392.B.B<class G3_C1365`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C392::Method7.MI.8472<System.Object>()#" call void Generated893::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G3_C1365`1<class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C392::Method7.MI.8472<System.Object>()#" call void Generated893::M.IBase2.A.T<class BaseClass0,class G3_C1365`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C392::Method7.MI.8472<System.Object>()#" call void Generated893::M.IBase2.A.A<class G3_C1365`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G3_C1365::ClassMethod2131.MI.15791()#G3_C1365::ClassMethod2132.MI.15792()#G3_C1365::ClassMethod4121.15787()#G3_C1365::ClassMethod4122.15788()#G3_C1365::ClassMethod4123.15789<System.Object>()#G3_C1365::ClassMethod4124.15790<System.Object>()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G3_C1365::Method7.15786<System.Object>()#" call void Generated893::M.G3_C1365.T<class BaseClass1,class G3_C1365`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G3_C1365::ClassMethod2131.MI.15791()#G3_C1365::ClassMethod2132.MI.15792()#G3_C1365::ClassMethod4121.15787()#G3_C1365::ClassMethod4122.15788()#G3_C1365::ClassMethod4123.15789<System.Object>()#G3_C1365::ClassMethod4124.15790<System.Object>()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G3_C1365::Method7.15786<System.Object>()#" call void Generated893::M.G3_C1365.B<class G3_C1365`1<class BaseClass1>>(!!0,string) newobj instance void class G2_C392`2<class BaseClass0,class BaseClass0>::.ctor() stloc.0 ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G1_C7::Method7.4815<System.Object>()#" call void Generated893::M.G1_C7.T.T<class BaseClass0,class BaseClass1,class G2_C392`2<class BaseClass0,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G1_C7::Method7.4815<System.Object>()#" call void Generated893::M.G1_C7.A.T<class BaseClass1,class G2_C392`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G1_C7::Method7.4815<System.Object>()#" call void Generated893::M.G1_C7.A.B<class G2_C392`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C392::Method4.MI.8468()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#" call void Generated893::M.IBase1.T<class BaseClass0,class G2_C392`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C392::Method4.MI.8468()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#" call void Generated893::M.IBase1.A<class G2_C392`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C7::Method7.MI.4816<System.Object>()#" call void Generated893::M.IBase2.T.T<class BaseClass1,class BaseClass0,class G2_C392`2<class BaseClass0,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C7::Method7.MI.4816<System.Object>()#" call void Generated893::M.IBase2.B.T<class BaseClass0,class G2_C392`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C7::Method7.MI.4816<System.Object>()#" call void Generated893::M.IBase2.B.A<class G2_C392`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C392::Method7.MI.8472<System.Object>()#" call void Generated893::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G2_C392`2<class BaseClass0,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C392::Method7.MI.8472<System.Object>()#" call void Generated893::M.IBase2.A.T<class BaseClass0,class G2_C392`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C392::Method7.MI.8472<System.Object>()#" call void Generated893::M.IBase2.A.A<class G2_C392`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C392::Method7.MI.8472<System.Object>()#" call void Generated893::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G2_C392`2<class BaseClass0,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C392::Method7.MI.8472<System.Object>()#" call void Generated893::M.IBase2.A.T<class BaseClass1,class G2_C392`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C392::Method7.MI.8472<System.Object>()#" call void Generated893::M.IBase2.A.B<class G2_C392`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C7::Method7.MI.4816<System.Object>()#" call void Generated893::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G2_C392`2<class BaseClass0,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C7::Method7.MI.4816<System.Object>()#" call void Generated893::M.IBase2.B.T<class BaseClass1,class G2_C392`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C7::Method7.MI.4816<System.Object>()#" call void Generated893::M.IBase2.B.B<class G2_C392`2<class BaseClass0,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G2_C392::ClassMethod2131.8473()#G2_C392::ClassMethod2132.8474()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G2_C392::Method7.8471<System.Object>()#" call void Generated893::M.G2_C392.T.T<class BaseClass0,class BaseClass0,class G2_C392`2<class BaseClass0,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G2_C392::ClassMethod2131.8473()#G2_C392::ClassMethod2132.8474()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G2_C392::Method7.8471<System.Object>()#" call void Generated893::M.G2_C392.A.T<class BaseClass0,class G2_C392`2<class BaseClass0,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G2_C392::ClassMethod2131.8473()#G2_C392::ClassMethod2132.8474()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G2_C392::Method7.8471<System.Object>()#" call void Generated893::M.G2_C392.A.A<class G2_C392`2<class BaseClass0,class BaseClass0>>(!!0,string) newobj instance void class G2_C392`2<class BaseClass0,class BaseClass1>::.ctor() stloc.0 ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G1_C7::Method7.4815<System.Object>()#" call void Generated893::M.G1_C7.T.T<class BaseClass1,class BaseClass1,class G2_C392`2<class BaseClass0,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G1_C7::Method7.4815<System.Object>()#" call void Generated893::M.G1_C7.B.T<class BaseClass1,class G2_C392`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G1_C7::Method7.4815<System.Object>()#" call void Generated893::M.G1_C7.B.B<class G2_C392`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C392::Method4.8467()#G2_C392::Method5.8469()#G1_C7::Method6.MI.4814<System.Object>()#" call void Generated893::M.IBase1.T<class BaseClass1,class G2_C392`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C392::Method4.8467()#G2_C392::Method5.8469()#G1_C7::Method6.MI.4814<System.Object>()#" call void Generated893::M.IBase1.B<class G2_C392`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C7::Method7.MI.4816<System.Object>()#" call void Generated893::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G2_C392`2<class BaseClass0,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C7::Method7.MI.4816<System.Object>()#" call void Generated893::M.IBase2.B.T<class BaseClass1,class G2_C392`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C7::Method7.MI.4816<System.Object>()#" call void Generated893::M.IBase2.B.B<class G2_C392`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C392::Method4.MI.8468()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#" call void Generated893::M.IBase1.T<class BaseClass0,class G2_C392`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C392::Method4.MI.8468()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#" call void Generated893::M.IBase1.A<class G2_C392`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C392::Method7.MI.8472<System.Object>()#" call void Generated893::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G2_C392`2<class BaseClass0,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C392::Method7.MI.8472<System.Object>()#" call void Generated893::M.IBase2.A.T<class BaseClass1,class G2_C392`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C392::Method7.MI.8472<System.Object>()#" call void Generated893::M.IBase2.A.B<class G2_C392`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G2_C392::ClassMethod2131.8473()#G2_C392::ClassMethod2132.8474()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G2_C392::Method7.8471<System.Object>()#" call void Generated893::M.G2_C392.T.T<class BaseClass0,class BaseClass1,class G2_C392`2<class BaseClass0,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G2_C392::ClassMethod2131.8473()#G2_C392::ClassMethod2132.8474()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G2_C392::Method7.8471<System.Object>()#" call void Generated893::M.G2_C392.A.T<class BaseClass1,class G2_C392`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G2_C392::ClassMethod2131.8473()#G2_C392::ClassMethod2132.8474()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G2_C392::Method7.8471<System.Object>()#" call void Generated893::M.G2_C392.A.B<class G2_C392`2<class BaseClass0,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C392::Method7.MI.8472<System.Object>()#" call void Generated893::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G2_C392`2<class BaseClass0,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C392::Method7.MI.8472<System.Object>()#" call void Generated893::M.IBase2.A.T<class BaseClass0,class G2_C392`2<class BaseClass0,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C392::Method7.MI.8472<System.Object>()#" call void Generated893::M.IBase2.A.A<class G2_C392`2<class BaseClass0,class BaseClass1>>(!!0,string) newobj instance void class G2_C392`2<class BaseClass1,class BaseClass0>::.ctor() stloc.0 ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G1_C7::Method7.4815<System.Object>()#" call void Generated893::M.G1_C7.T.T<class BaseClass0,class BaseClass1,class G2_C392`2<class BaseClass1,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G1_C7::Method7.4815<System.Object>()#" call void Generated893::M.G1_C7.A.T<class BaseClass1,class G2_C392`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G1_C7::Method7.4815<System.Object>()#" call void Generated893::M.G1_C7.A.B<class G2_C392`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C392::Method4.MI.8468()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#" call void Generated893::M.IBase1.T<class BaseClass0,class G2_C392`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C392::Method4.MI.8468()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#" call void Generated893::M.IBase1.A<class G2_C392`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C7::Method7.MI.4816<System.Object>()#" call void Generated893::M.IBase2.T.T<class BaseClass1,class BaseClass0,class G2_C392`2<class BaseClass1,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C7::Method7.MI.4816<System.Object>()#" call void Generated893::M.IBase2.B.T<class BaseClass0,class G2_C392`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C7::Method7.MI.4816<System.Object>()#" call void Generated893::M.IBase2.B.A<class G2_C392`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C392::Method7.MI.8472<System.Object>()#" call void Generated893::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G2_C392`2<class BaseClass1,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C392::Method7.MI.8472<System.Object>()#" call void Generated893::M.IBase2.A.T<class BaseClass0,class G2_C392`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C392::Method7.MI.8472<System.Object>()#" call void Generated893::M.IBase2.A.A<class G2_C392`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C392::Method7.MI.8472<System.Object>()#" call void Generated893::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G2_C392`2<class BaseClass1,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C392::Method7.MI.8472<System.Object>()#" call void Generated893::M.IBase2.A.T<class BaseClass1,class G2_C392`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C392::Method7.MI.8472<System.Object>()#" call void Generated893::M.IBase2.A.B<class G2_C392`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C7::Method7.MI.4816<System.Object>()#" call void Generated893::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G2_C392`2<class BaseClass1,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C7::Method7.MI.4816<System.Object>()#" call void Generated893::M.IBase2.B.T<class BaseClass1,class G2_C392`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C7::Method7.MI.4816<System.Object>()#" call void Generated893::M.IBase2.B.B<class G2_C392`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G2_C392::ClassMethod2131.8473()#G2_C392::ClassMethod2132.8474()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G2_C392::Method7.8471<System.Object>()#" call void Generated893::M.G2_C392.T.T<class BaseClass1,class BaseClass0,class G2_C392`2<class BaseClass1,class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G2_C392::ClassMethod2131.8473()#G2_C392::ClassMethod2132.8474()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G2_C392::Method7.8471<System.Object>()#" call void Generated893::M.G2_C392.B.T<class BaseClass0,class G2_C392`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G2_C392::ClassMethod2131.8473()#G2_C392::ClassMethod2132.8474()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G2_C392::Method7.8471<System.Object>()#" call void Generated893::M.G2_C392.B.A<class G2_C392`2<class BaseClass1,class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C392::Method4.MI.8468()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#" call void Generated893::M.IBase1.T<class BaseClass1,class G2_C392`2<class BaseClass1,class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C392::Method4.MI.8468()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#" call void Generated893::M.IBase1.B<class G2_C392`2<class BaseClass1,class BaseClass0>>(!!0,string) newobj instance void class G2_C392`2<class BaseClass1,class BaseClass1>::.ctor() stloc.0 ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G1_C7::Method7.4815<System.Object>()#" call void Generated893::M.G1_C7.T.T<class BaseClass1,class BaseClass1,class G2_C392`2<class BaseClass1,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G1_C7::Method7.4815<System.Object>()#" call void Generated893::M.G1_C7.B.T<class BaseClass1,class G2_C392`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G1_C7::Method7.4815<System.Object>()#" call void Generated893::M.G1_C7.B.B<class G2_C392`2<class BaseClass1,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C392::Method4.MI.8468()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#" call void Generated893::M.IBase1.T<class BaseClass1,class G2_C392`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C392::Method4.MI.8468()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#" call void Generated893::M.IBase1.B<class G2_C392`2<class BaseClass1,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C7::Method7.MI.4816<System.Object>()#" call void Generated893::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G2_C392`2<class BaseClass1,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C7::Method7.MI.4816<System.Object>()#" call void Generated893::M.IBase2.B.T<class BaseClass1,class G2_C392`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C7::Method7.MI.4816<System.Object>()#" call void Generated893::M.IBase2.B.B<class G2_C392`2<class BaseClass1,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C392::Method4.MI.8468()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#" call void Generated893::M.IBase1.T<class BaseClass0,class G2_C392`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C392::Method4.MI.8468()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#" call void Generated893::M.IBase1.A<class G2_C392`2<class BaseClass1,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C392::Method7.MI.8472<System.Object>()#" call void Generated893::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G2_C392`2<class BaseClass1,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C392::Method7.MI.8472<System.Object>()#" call void Generated893::M.IBase2.A.T<class BaseClass1,class G2_C392`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C392::Method7.MI.8472<System.Object>()#" call void Generated893::M.IBase2.A.B<class G2_C392`2<class BaseClass1,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G2_C392::ClassMethod2131.8473()#G2_C392::ClassMethod2132.8474()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G2_C392::Method7.8471<System.Object>()#" call void Generated893::M.G2_C392.T.T<class BaseClass1,class BaseClass1,class G2_C392`2<class BaseClass1,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G2_C392::ClassMethod2131.8473()#G2_C392::ClassMethod2132.8474()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G2_C392::Method7.8471<System.Object>()#" call void Generated893::M.G2_C392.B.T<class BaseClass1,class G2_C392`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C7::ClassMethod1328.4817()#G2_C392::ClassMethod1329.MI.8475()#G2_C392::ClassMethod1330.MI.8476<System.Object>()#G1_C7::ClassMethod1331.4820<System.Object>()#G2_C392::ClassMethod2131.8473()#G2_C392::ClassMethod2132.8474()#G2_C392::Method4.8467()#G2_C392::Method5.8469()#G2_C392::Method6.8470<System.Object>()#G2_C392::Method7.8471<System.Object>()#" call void Generated893::M.G2_C392.B.B<class G2_C392`2<class BaseClass1,class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C392::Method7.MI.8472<System.Object>()#" call void Generated893::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G2_C392`2<class BaseClass1,class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C392::Method7.MI.8472<System.Object>()#" call void Generated893::M.IBase2.A.T<class BaseClass0,class G2_C392`2<class BaseClass1,class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C392::Method7.MI.8472<System.Object>()#" call void Generated893::M.IBase2.A.A<class G2_C392`2<class BaseClass1,class BaseClass1>>(!!0,string) ldstr "========================================================================\n\n" call void [mscorlib]System.Console::WriteLine(string) ret } .method public hidebysig static void StructConstrainedInterfaceCallsTest() cil managed { .maxstack 10 ldstr "===================== Struct Constrained Interface Calls Test =====================" call void [mscorlib]System.Console::WriteLine(string) ldstr "========================================================================\n\n" call void [mscorlib]System.Console::WriteLine(string) ret } .method public hidebysig static void CalliTest() cil managed { .maxstack 10 .locals init (object V_0) ldstr "========================== Method Calli Test ==========================" call void [mscorlib]System.Console::WriteLine(string) newobj instance void class G3_C1365`1<class BaseClass0>::.ctor() stloc.0 ldloc.0 castclass class G1_C7`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass0,class BaseClass1>::ClassMethod1331<object>() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G1_C7::ClassMethod1331.4820<System.Object>()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass0,class BaseClass1>::ClassMethod1330<object>() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G2_C392::ClassMethod1330.MI.8476<System.Object>()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass0,class BaseClass1>::ClassMethod1329() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G2_C392::ClassMethod1329.MI.8475()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass0,class BaseClass1>::ClassMethod1328() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G1_C7::ClassMethod1328.4817()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G1_C7::Method7.4815<System.Object>()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass0,class BaseClass1>::Method6<object>() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass0,class BaseClass1>::Method5() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G2_C392::Method5.8469()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass0,class BaseClass1>::Method4() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G2_C392::Method4.8467()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G2_C392::Method4.MI.8468()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G2_C392::Method5.8469()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G3_C1365::Method7.15786<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G3_C1365::Method7.15786<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G3_C1365::Method7.15786<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G3_C1365::Method7.15786<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass0>::ClassMethod2132() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G3_C1365::ClassMethod2132.MI.15792()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass0>::ClassMethod2131() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G3_C1365::ClassMethod2131.MI.15791()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass0>::Method7<object>() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G3_C1365::Method7.15786<System.Object>()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass0>::Method6<object>() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass0>::Method5() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G2_C392::Method5.8469()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass0>::Method4() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G2_C392::Method4.8467()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass0>::ClassMethod1331<object>() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G1_C7::ClassMethod1331.4820<System.Object>()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass0>::ClassMethod1330<object>() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G2_C392::ClassMethod1330.MI.8476<System.Object>()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass0>::ClassMethod1329() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G2_C392::ClassMethod1329.MI.8475()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass0>::ClassMethod1328() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G1_C7::ClassMethod1328.4817()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method4() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G2_C392::Method4.MI.8468()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method5() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G2_C392::Method5.8469()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method6<object>() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1365`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1365`1<class BaseClass0>::ClassMethod4124<object>() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G3_C1365::ClassMethod4124.15790<System.Object>()" ldstr "class G3_C1365`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1365`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1365`1<class BaseClass0>::ClassMethod4123<object>() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G3_C1365::ClassMethod4123.15789<System.Object>()" ldstr "class G3_C1365`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1365`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1365`1<class BaseClass0>::ClassMethod4122() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G3_C1365::ClassMethod4122.15788()" ldstr "class G3_C1365`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1365`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1365`1<class BaseClass0>::ClassMethod4121() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G3_C1365::ClassMethod4121.15787()" ldstr "class G3_C1365`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1365`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1365`1<class BaseClass0>::Method7<object>() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G3_C1365::Method7.15786<System.Object>()" ldstr "class G3_C1365`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1365`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1365`1<class BaseClass0>::ClassMethod2132() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G3_C1365::ClassMethod2132.MI.15792()" ldstr "class G3_C1365`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1365`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1365`1<class BaseClass0>::ClassMethod2131() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G3_C1365::ClassMethod2131.MI.15791()" ldstr "class G3_C1365`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1365`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1365`1<class BaseClass0>::Method6<object>() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class G3_C1365`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1365`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1365`1<class BaseClass0>::Method5() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G2_C392::Method5.8469()" ldstr "class G3_C1365`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1365`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1365`1<class BaseClass0>::Method4() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G2_C392::Method4.8467()" ldstr "class G3_C1365`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1365`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1365`1<class BaseClass0>::ClassMethod1331<object>() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G1_C7::ClassMethod1331.4820<System.Object>()" ldstr "class G3_C1365`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1365`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1365`1<class BaseClass0>::ClassMethod1330<object>() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G2_C392::ClassMethod1330.MI.8476<System.Object>()" ldstr "class G3_C1365`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1365`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1365`1<class BaseClass0>::ClassMethod1329() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G2_C392::ClassMethod1329.MI.8475()" ldstr "class G3_C1365`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1365`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1365`1<class BaseClass0>::ClassMethod1328() calli default string(class G3_C1365`1<class BaseClass0>) ldstr "G1_C7::ClassMethod1328.4817()" ldstr "class G3_C1365`1<class BaseClass0> on type class G3_C1365`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G3_C1365`1<class BaseClass1>::.ctor() stloc.0 ldloc.0 castclass class G1_C7`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass1,class BaseClass1>::ClassMethod1331<object>() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G1_C7::ClassMethod1331.4820<System.Object>()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass1,class BaseClass1>::ClassMethod1330<object>() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G2_C392::ClassMethod1330.MI.8476<System.Object>()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass1,class BaseClass1>::ClassMethod1329() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G2_C392::ClassMethod1329.MI.8475()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass1,class BaseClass1>::ClassMethod1328() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G1_C7::ClassMethod1328.4817()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G1_C7::Method7.4815<System.Object>()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass1,class BaseClass1>::Method6<object>() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass1,class BaseClass1>::Method5() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G2_C392::Method5.8469()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass1,class BaseClass1>::Method4() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G2_C392::Method4.8467()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method4() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G2_C392::Method4.MI.8468()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method5() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G2_C392::Method5.8469()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method6<object>() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G3_C1365::Method7.15786<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G2_C392::Method4.MI.8468()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G2_C392::Method5.8469()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G3_C1365::Method7.15786<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass1>::ClassMethod2132() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G3_C1365::ClassMethod2132.MI.15792()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass1>::ClassMethod2131() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G3_C1365::ClassMethod2131.MI.15791()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G3_C1365::Method7.15786<System.Object>()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass1>::Method6<object>() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass1>::Method5() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G2_C392::Method5.8469()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass1>::Method4() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G2_C392::Method4.8467()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass1>::ClassMethod1331<object>() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G1_C7::ClassMethod1331.4820<System.Object>()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass1>::ClassMethod1330<object>() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G2_C392::ClassMethod1330.MI.8476<System.Object>()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass1>::ClassMethod1329() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G2_C392::ClassMethod1329.MI.8475()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass1>::ClassMethod1328() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G1_C7::ClassMethod1328.4817()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G2_C392::Method7.MI.8472<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1365`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1365`1<class BaseClass1>::ClassMethod4124<object>() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G3_C1365::ClassMethod4124.15790<System.Object>()" ldstr "class G3_C1365`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1365`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1365`1<class BaseClass1>::ClassMethod4123<object>() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G3_C1365::ClassMethod4123.15789<System.Object>()" ldstr "class G3_C1365`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1365`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1365`1<class BaseClass1>::ClassMethod4122() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G3_C1365::ClassMethod4122.15788()" ldstr "class G3_C1365`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1365`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1365`1<class BaseClass1>::ClassMethod4121() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G3_C1365::ClassMethod4121.15787()" ldstr "class G3_C1365`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1365`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1365`1<class BaseClass1>::Method7<object>() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G3_C1365::Method7.15786<System.Object>()" ldstr "class G3_C1365`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1365`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1365`1<class BaseClass1>::ClassMethod2132() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G3_C1365::ClassMethod2132.MI.15792()" ldstr "class G3_C1365`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1365`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1365`1<class BaseClass1>::ClassMethod2131() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G3_C1365::ClassMethod2131.MI.15791()" ldstr "class G3_C1365`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1365`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1365`1<class BaseClass1>::Method6<object>() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class G3_C1365`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1365`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1365`1<class BaseClass1>::Method5() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G2_C392::Method5.8469()" ldstr "class G3_C1365`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1365`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1365`1<class BaseClass1>::Method4() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G2_C392::Method4.8467()" ldstr "class G3_C1365`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1365`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1365`1<class BaseClass1>::ClassMethod1331<object>() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G1_C7::ClassMethod1331.4820<System.Object>()" ldstr "class G3_C1365`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1365`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1365`1<class BaseClass1>::ClassMethod1330<object>() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G2_C392::ClassMethod1330.MI.8476<System.Object>()" ldstr "class G3_C1365`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1365`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1365`1<class BaseClass1>::ClassMethod1329() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G2_C392::ClassMethod1329.MI.8475()" ldstr "class G3_C1365`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1365`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1365`1<class BaseClass1>::ClassMethod1328() calli default string(class G3_C1365`1<class BaseClass1>) ldstr "G1_C7::ClassMethod1328.4817()" ldstr "class G3_C1365`1<class BaseClass1> on type class G3_C1365`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G2_C392`2<class BaseClass0,class BaseClass0>::.ctor() stloc.0 ldloc.0 castclass class G1_C7`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass0,class BaseClass1>::ClassMethod1331<object>() calli default string(class G2_C392`2<class BaseClass0,class BaseClass0>) ldstr "G1_C7::ClassMethod1331.4820<System.Object>()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass0,class BaseClass1>::ClassMethod1330<object>() calli default string(class G2_C392`2<class BaseClass0,class BaseClass0>) ldstr "G2_C392::ClassMethod1330.MI.8476<System.Object>()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass0,class BaseClass1>::ClassMethod1329() calli default string(class G2_C392`2<class BaseClass0,class BaseClass0>) ldstr "G2_C392::ClassMethod1329.MI.8475()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass0,class BaseClass1>::ClassMethod1328() calli default string(class G2_C392`2<class BaseClass0,class BaseClass0>) ldstr "G1_C7::ClassMethod1328.4817()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C392`2<class BaseClass0,class BaseClass0>) ldstr "G1_C7::Method7.4815<System.Object>()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass0,class BaseClass1>::Method6<object>() calli default string(class G2_C392`2<class BaseClass0,class BaseClass0>) ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass0,class BaseClass1>::Method5() calli default string(class G2_C392`2<class BaseClass0,class BaseClass0>) ldstr "G2_C392::Method5.8469()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass0,class BaseClass1>::Method4() calli default string(class G2_C392`2<class BaseClass0,class BaseClass0>) ldstr "G2_C392::Method4.8467()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G2_C392`2<class BaseClass0,class BaseClass0>) ldstr "G2_C392::Method4.MI.8468()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G2_C392`2<class BaseClass0,class BaseClass0>) ldstr "G2_C392::Method5.8469()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G2_C392`2<class BaseClass0,class BaseClass0>) ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() calli default string(class G2_C392`2<class BaseClass0,class BaseClass0>) ldstr "G1_C7::Method7.MI.4816<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G2_C392`2<class BaseClass0,class BaseClass0>) ldstr "G2_C392::Method7.MI.8472<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C392`2<class BaseClass0,class BaseClass0>) ldstr "G2_C392::Method7.MI.8472<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G2_C392`2<class BaseClass0,class BaseClass0>) ldstr "G1_C7::Method7.MI.4816<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass0,class BaseClass0>::ClassMethod2132() calli default string(class G2_C392`2<class BaseClass0,class BaseClass0>) ldstr "G2_C392::ClassMethod2132.8474()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass0,class BaseClass0>::ClassMethod2131() calli default string(class G2_C392`2<class BaseClass0,class BaseClass0>) ldstr "G2_C392::ClassMethod2131.8473()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G2_C392`2<class BaseClass0,class BaseClass0>) ldstr "G2_C392::Method7.8471<System.Object>()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass0,class BaseClass0>::Method6<object>() calli default string(class G2_C392`2<class BaseClass0,class BaseClass0>) ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass0,class BaseClass0>::Method5() calli default string(class G2_C392`2<class BaseClass0,class BaseClass0>) ldstr "G2_C392::Method5.8469()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass0,class BaseClass0>::Method4() calli default string(class G2_C392`2<class BaseClass0,class BaseClass0>) ldstr "G2_C392::Method4.8467()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass0,class BaseClass0>::ClassMethod1331<object>() calli default string(class G2_C392`2<class BaseClass0,class BaseClass0>) ldstr "G1_C7::ClassMethod1331.4820<System.Object>()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass0,class BaseClass0>::ClassMethod1330<object>() calli default string(class G2_C392`2<class BaseClass0,class BaseClass0>) ldstr "G2_C392::ClassMethod1330.MI.8476<System.Object>()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass0,class BaseClass0>::ClassMethod1329() calli default string(class G2_C392`2<class BaseClass0,class BaseClass0>) ldstr "G2_C392::ClassMethod1329.MI.8475()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass0,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass0,class BaseClass0>::ClassMethod1328() calli default string(class G2_C392`2<class BaseClass0,class BaseClass0>) ldstr "G1_C7::ClassMethod1328.4817()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G2_C392`2<class BaseClass0,class BaseClass1>::.ctor() stloc.0 ldloc.0 castclass class G1_C7`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass1,class BaseClass1>::ClassMethod1331<object>() calli default string(class G2_C392`2<class BaseClass0,class BaseClass1>) ldstr "G1_C7::ClassMethod1331.4820<System.Object>()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass1,class BaseClass1>::ClassMethod1330<object>() calli default string(class G2_C392`2<class BaseClass0,class BaseClass1>) ldstr "G2_C392::ClassMethod1330.MI.8476<System.Object>()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass1,class BaseClass1>::ClassMethod1329() calli default string(class G2_C392`2<class BaseClass0,class BaseClass1>) ldstr "G2_C392::ClassMethod1329.MI.8475()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass1,class BaseClass1>::ClassMethod1328() calli default string(class G2_C392`2<class BaseClass0,class BaseClass1>) ldstr "G1_C7::ClassMethod1328.4817()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G2_C392`2<class BaseClass0,class BaseClass1>) ldstr "G1_C7::Method7.4815<System.Object>()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass1,class BaseClass1>::Method6<object>() calli default string(class G2_C392`2<class BaseClass0,class BaseClass1>) ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass1,class BaseClass1>::Method5() calli default string(class G2_C392`2<class BaseClass0,class BaseClass1>) ldstr "G2_C392::Method5.8469()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass1,class BaseClass1>::Method4() calli default string(class G2_C392`2<class BaseClass0,class BaseClass1>) ldstr "G2_C392::Method4.8467()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method4() calli default string(class G2_C392`2<class BaseClass0,class BaseClass1>) ldstr "G2_C392::Method4.8467()" ldstr "class IBase1`1<class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method5() calli default string(class G2_C392`2<class BaseClass0,class BaseClass1>) ldstr "G2_C392::Method5.8469()" ldstr "class IBase1`1<class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method6<object>() calli default string(class G2_C392`2<class BaseClass0,class BaseClass1>) ldstr "G1_C7::Method6.MI.4814<System.Object>()" ldstr "class IBase1`1<class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G2_C392`2<class BaseClass0,class BaseClass1>) ldstr "G1_C7::Method7.MI.4816<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G2_C392`2<class BaseClass0,class BaseClass1>) ldstr "G2_C392::Method4.MI.8468()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G2_C392`2<class BaseClass0,class BaseClass1>) ldstr "G2_C392::Method5.8469()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G2_C392`2<class BaseClass0,class BaseClass1>) ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C392`2<class BaseClass0,class BaseClass1>) ldstr "G2_C392::Method7.MI.8472<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass0,class BaseClass1>::ClassMethod2132() calli default string(class G2_C392`2<class BaseClass0,class BaseClass1>) ldstr "G2_C392::ClassMethod2132.8474()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass0,class BaseClass1>::ClassMethod2131() calli default string(class G2_C392`2<class BaseClass0,class BaseClass1>) ldstr "G2_C392::ClassMethod2131.8473()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C392`2<class BaseClass0,class BaseClass1>) ldstr "G2_C392::Method7.8471<System.Object>()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass0,class BaseClass1>::Method6<object>() calli default string(class G2_C392`2<class BaseClass0,class BaseClass1>) ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass0,class BaseClass1>::Method5() calli default string(class G2_C392`2<class BaseClass0,class BaseClass1>) ldstr "G2_C392::Method5.8469()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass0,class BaseClass1>::Method4() calli default string(class G2_C392`2<class BaseClass0,class BaseClass1>) ldstr "G2_C392::Method4.8467()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass0,class BaseClass1>::ClassMethod1331<object>() calli default string(class G2_C392`2<class BaseClass0,class BaseClass1>) ldstr "G1_C7::ClassMethod1331.4820<System.Object>()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass0,class BaseClass1>::ClassMethod1330<object>() calli default string(class G2_C392`2<class BaseClass0,class BaseClass1>) ldstr "G2_C392::ClassMethod1330.MI.8476<System.Object>()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass0,class BaseClass1>::ClassMethod1329() calli default string(class G2_C392`2<class BaseClass0,class BaseClass1>) ldstr "G2_C392::ClassMethod1329.MI.8475()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass0,class BaseClass1>::ClassMethod1328() calli default string(class G2_C392`2<class BaseClass0,class BaseClass1>) ldstr "G1_C7::ClassMethod1328.4817()" ldstr "class G2_C392`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G2_C392`2<class BaseClass0,class BaseClass1>) ldstr "G2_C392::Method7.MI.8472<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C392`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G2_C392`2<class BaseClass1,class BaseClass0>::.ctor() stloc.0 ldloc.0 castclass class G1_C7`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass0,class BaseClass1>::ClassMethod1331<object>() calli default string(class G2_C392`2<class BaseClass1,class BaseClass0>) ldstr "G1_C7::ClassMethod1331.4820<System.Object>()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass0,class BaseClass1>::ClassMethod1330<object>() calli default string(class G2_C392`2<class BaseClass1,class BaseClass0>) ldstr "G2_C392::ClassMethod1330.MI.8476<System.Object>()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass0,class BaseClass1>::ClassMethod1329() calli default string(class G2_C392`2<class BaseClass1,class BaseClass0>) ldstr "G2_C392::ClassMethod1329.MI.8475()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass0,class BaseClass1>::ClassMethod1328() calli default string(class G2_C392`2<class BaseClass1,class BaseClass0>) ldstr "G1_C7::ClassMethod1328.4817()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C392`2<class BaseClass1,class BaseClass0>) ldstr "G1_C7::Method7.4815<System.Object>()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass0,class BaseClass1>::Method6<object>() calli default string(class G2_C392`2<class BaseClass1,class BaseClass0>) ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass0,class BaseClass1>::Method5() calli default string(class G2_C392`2<class BaseClass1,class BaseClass0>) ldstr "G2_C392::Method5.8469()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass0,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass0,class BaseClass1>::Method4() calli default string(class G2_C392`2<class BaseClass1,class BaseClass0>) ldstr "G2_C392::Method4.8467()" ldstr "class G1_C7`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G2_C392`2<class BaseClass1,class BaseClass0>) ldstr "G2_C392::Method4.MI.8468()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G2_C392`2<class BaseClass1,class BaseClass0>) ldstr "G2_C392::Method5.8469()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G2_C392`2<class BaseClass1,class BaseClass0>) ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() calli default string(class G2_C392`2<class BaseClass1,class BaseClass0>) ldstr "G1_C7::Method7.MI.4816<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G2_C392`2<class BaseClass1,class BaseClass0>) ldstr "G2_C392::Method7.MI.8472<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C392`2<class BaseClass1,class BaseClass0>) ldstr "G2_C392::Method7.MI.8472<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G2_C392`2<class BaseClass1,class BaseClass0>) ldstr "G1_C7::Method7.MI.4816<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass0>::ClassMethod2132() calli default string(class G2_C392`2<class BaseClass1,class BaseClass0>) ldstr "G2_C392::ClassMethod2132.8474()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass0>::ClassMethod2131() calli default string(class G2_C392`2<class BaseClass1,class BaseClass0>) ldstr "G2_C392::ClassMethod2131.8473()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass0>::Method7<object>() calli default string(class G2_C392`2<class BaseClass1,class BaseClass0>) ldstr "G2_C392::Method7.8471<System.Object>()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass0>::Method6<object>() calli default string(class G2_C392`2<class BaseClass1,class BaseClass0>) ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass0>::Method5() calli default string(class G2_C392`2<class BaseClass1,class BaseClass0>) ldstr "G2_C392::Method5.8469()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass0>::Method4() calli default string(class G2_C392`2<class BaseClass1,class BaseClass0>) ldstr "G2_C392::Method4.8467()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass0>::ClassMethod1331<object>() calli default string(class G2_C392`2<class BaseClass1,class BaseClass0>) ldstr "G1_C7::ClassMethod1331.4820<System.Object>()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass0>::ClassMethod1330<object>() calli default string(class G2_C392`2<class BaseClass1,class BaseClass0>) ldstr "G2_C392::ClassMethod1330.MI.8476<System.Object>()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass0>::ClassMethod1329() calli default string(class G2_C392`2<class BaseClass1,class BaseClass0>) ldstr "G2_C392::ClassMethod1329.MI.8475()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass0>::ClassMethod1328() calli default string(class G2_C392`2<class BaseClass1,class BaseClass0>) ldstr "G1_C7::ClassMethod1328.4817()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method4() calli default string(class G2_C392`2<class BaseClass1,class BaseClass0>) ldstr "G2_C392::Method4.MI.8468()" ldstr "class IBase1`1<class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method5() calli default string(class G2_C392`2<class BaseClass1,class BaseClass0>) ldstr "G2_C392::Method5.8469()" ldstr "class IBase1`1<class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method6<object>() calli default string(class G2_C392`2<class BaseClass1,class BaseClass0>) ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class IBase1`1<class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G2_C392`2<class BaseClass1,class BaseClass1>::.ctor() stloc.0 ldloc.0 castclass class G1_C7`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass1,class BaseClass1>::ClassMethod1331<object>() calli default string(class G2_C392`2<class BaseClass1,class BaseClass1>) ldstr "G1_C7::ClassMethod1331.4820<System.Object>()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass1,class BaseClass1>::ClassMethod1330<object>() calli default string(class G2_C392`2<class BaseClass1,class BaseClass1>) ldstr "G2_C392::ClassMethod1330.MI.8476<System.Object>()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass1,class BaseClass1>::ClassMethod1329() calli default string(class G2_C392`2<class BaseClass1,class BaseClass1>) ldstr "G2_C392::ClassMethod1329.MI.8475()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass1,class BaseClass1>::ClassMethod1328() calli default string(class G2_C392`2<class BaseClass1,class BaseClass1>) ldstr "G1_C7::ClassMethod1328.4817()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G2_C392`2<class BaseClass1,class BaseClass1>) ldstr "G1_C7::Method7.4815<System.Object>()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass1,class BaseClass1>::Method6<object>() calli default string(class G2_C392`2<class BaseClass1,class BaseClass1>) ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass1,class BaseClass1>::Method5() calli default string(class G2_C392`2<class BaseClass1,class BaseClass1>) ldstr "G2_C392::Method5.8469()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C7`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G1_C7`2<class BaseClass1,class BaseClass1>::Method4() calli default string(class G2_C392`2<class BaseClass1,class BaseClass1>) ldstr "G2_C392::Method4.8467()" ldstr "class G1_C7`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method4() calli default string(class G2_C392`2<class BaseClass1,class BaseClass1>) ldstr "G2_C392::Method4.MI.8468()" ldstr "class IBase1`1<class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method5() calli default string(class G2_C392`2<class BaseClass1,class BaseClass1>) ldstr "G2_C392::Method5.8469()" ldstr "class IBase1`1<class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method6<object>() calli default string(class G2_C392`2<class BaseClass1,class BaseClass1>) ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class IBase1`1<class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G2_C392`2<class BaseClass1,class BaseClass1>) ldstr "G1_C7::Method7.MI.4816<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G2_C392`2<class BaseClass1,class BaseClass1>) ldstr "G2_C392::Method4.MI.8468()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G2_C392`2<class BaseClass1,class BaseClass1>) ldstr "G2_C392::Method5.8469()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G2_C392`2<class BaseClass1,class BaseClass1>) ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G2_C392`2<class BaseClass1,class BaseClass1>) ldstr "G2_C392::Method7.MI.8472<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass1>::ClassMethod2132() calli default string(class G2_C392`2<class BaseClass1,class BaseClass1>) ldstr "G2_C392::ClassMethod2132.8474()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass1>::ClassMethod2131() calli default string(class G2_C392`2<class BaseClass1,class BaseClass1>) ldstr "G2_C392::ClassMethod2131.8473()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G2_C392`2<class BaseClass1,class BaseClass1>) ldstr "G2_C392::Method7.8471<System.Object>()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass1>::Method6<object>() calli default string(class G2_C392`2<class BaseClass1,class BaseClass1>) ldstr "G2_C392::Method6.8470<System.Object>()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass1>::Method5() calli default string(class G2_C392`2<class BaseClass1,class BaseClass1>) ldstr "G2_C392::Method5.8469()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass1>::Method4() calli default string(class G2_C392`2<class BaseClass1,class BaseClass1>) ldstr "G2_C392::Method4.8467()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass1>::ClassMethod1331<object>() calli default string(class G2_C392`2<class BaseClass1,class BaseClass1>) ldstr "G1_C7::ClassMethod1331.4820<System.Object>()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass1>::ClassMethod1330<object>() calli default string(class G2_C392`2<class BaseClass1,class BaseClass1>) ldstr "G2_C392::ClassMethod1330.MI.8476<System.Object>()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass1>::ClassMethod1329() calli default string(class G2_C392`2<class BaseClass1,class BaseClass1>) ldstr "G2_C392::ClassMethod1329.MI.8475()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C392`2<class BaseClass1,class BaseClass1> ldloc.0 ldvirtftn instance string class G2_C392`2<class BaseClass1,class BaseClass1>::ClassMethod1328() calli default string(class G2_C392`2<class BaseClass1,class BaseClass1>) ldstr "G1_C7::ClassMethod1328.4817()" ldstr "class G2_C392`2<class BaseClass1,class BaseClass1> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G2_C392`2<class BaseClass1,class BaseClass1>) ldstr "G2_C392::Method7.MI.8472<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C392`2<class BaseClass1,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldstr "========================================================================\n\n" call void [mscorlib]System.Console::WriteLine(string) ret } .method public hidebysig static int32 Main() cil managed { .custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = ( 01 00 00 00 ) .entrypoint .maxstack 10 call void Generated893::MethodCallingTest() call void Generated893::ConstrainedCallsTest() call void Generated893::StructConstrainedInterfaceCallsTest() call void Generated893::CalliTest() ldc.i4 100 ret } }
-1
dotnet/runtime
66,372
Add Stopwatch.GetElapsedTime
Fixes https://github.com/dotnet/runtime/issues/65858
stephentoub
2022-03-09T01:52:28Z
2022-03-09T12:42:15Z
ca731545a58307870a0baebb0ee43eeea61f175f
c9f7f7389e8e9a00d501aef696333b67d218baac
Add Stopwatch.GetElapsedTime. Fixes https://github.com/dotnet/runtime/issues/65858
./src/libraries/System.Private.Xml/tests/XmlResolver/System.Xml.XmlResolver.Tests/XmlPreloadedResolverAddRemoveTests.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Xml.Resolvers; using System.Text; using System.IO; using Xunit; namespace System.Xml.XmlResolver.Tests { public class XmlPreloadedResolverAddRemoveTests { [Fact] public void XmlResolverAddWithInvalidData() { var xmlResolver = new XmlPreloadedResolver(); Assert.Throws<ArgumentNullException>(() => xmlResolver.Add(null, new byte[22])); Assert.Throws<ArgumentNullException>(() => xmlResolver.Add(new Uri("https://html"), null as byte[])); Assert.Throws<ArgumentNullException>(() => xmlResolver.Add(null, null, 0, 0)); Assert.Throws<ArgumentNullException>(() => xmlResolver.Add(new Uri("https://html"), null, 0, 0)); Assert.Throws<ArgumentOutOfRangeException>(() => xmlResolver.Add(new Uri("https://html"), new byte[22], -1, 0)); Assert.Throws<ArgumentOutOfRangeException>(() => xmlResolver.Add(new Uri("https://html"), new byte[22], 0, -1)); Assert.Throws<ArgumentOutOfRangeException>(() => xmlResolver.Add(new Uri("https://html"), new byte[11], 5, 20)); //No Exception should be thrown for the below cases as these are border cases xmlResolver.Add(new Uri("https://html"), new byte[0], 0, 0); xmlResolver.Add(new Uri("https://html"), new byte[5], 0, 5); Assert.Throws<ArgumentNullException>(() => xmlResolver.Add(null, new MemoryStream())); Assert.Throws<ArgumentNullException>(() => xmlResolver.Add(new Uri("https://html"), null as MemoryStream)); Assert.Throws<ArgumentNullException>(() => xmlResolver.Add(null, string.Empty)); Assert.Throws<ArgumentNullException>(() => xmlResolver.Add(new Uri("https://html"), null as string)); } [Fact] public void XmlResolverAddWithValidData() { var xmlResolver = new XmlPreloadedResolver(XmlKnownDtds.Xhtml10); byte[] data = Encoding.ASCII.GetBytes("hello world"); MemoryStream stream = new MemoryStream(data); xmlResolver.Add(new Uri("-//Sample//URI//For Testing", UriKind.RelativeOrAbsolute), stream); Stream result = xmlResolver.GetEntity(new Uri("-//Sample//URI//For Testing", UriKind.RelativeOrAbsolute), null, typeof(Stream)) as Stream; Assert.NotNull(result); byte[] output = new byte[data.Length]; result.Read(output, 0, output.Length); Assert.Equal(data, output); DummyStream dummyStream = new DummyStream(data); xmlResolver.Add(new Uri("-//W3C//DTD XHTML 1.0 Strict//EN", UriKind.RelativeOrAbsolute), dummyStream); Stream otherResult = xmlResolver.GetEntity(new Uri("-//W3C//DTD XHTML 1.0 Strict//EN", UriKind.RelativeOrAbsolute), null, typeof(Stream)) as Stream; output = new byte[data.Length]; otherResult.Read(output, 0, output.Length); Assert.Equal(data, output); } [Fact] public void XmlResolverRemoveWithInvalidData() { var xmlResolver = new XmlPreloadedResolver(XmlKnownDtds.Xhtml10); Assert.Throws<ArgumentNullException>(() => xmlResolver.Remove(null)); } [Fact] public void XmlResolverRemoveWithValidData() { var xmlResolver = new XmlPreloadedResolver(XmlKnownDtds.Xhtml10); byte[] data = Encoding.ASCII.GetBytes("hello world"); MemoryStream stream = new MemoryStream(data); xmlResolver.Add(new Uri("-//W3C//DTD XHTML 1.0 Transitional//EN", UriKind.RelativeOrAbsolute), stream); xmlResolver.Remove(new Uri("-//W3C//DTD XHTML 1.0 Transitional//EN", UriKind.RelativeOrAbsolute)); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Xml.Resolvers; using System.Text; using System.IO; using Xunit; namespace System.Xml.XmlResolver.Tests { public class XmlPreloadedResolverAddRemoveTests { [Fact] public void XmlResolverAddWithInvalidData() { var xmlResolver = new XmlPreloadedResolver(); Assert.Throws<ArgumentNullException>(() => xmlResolver.Add(null, new byte[22])); Assert.Throws<ArgumentNullException>(() => xmlResolver.Add(new Uri("https://html"), null as byte[])); Assert.Throws<ArgumentNullException>(() => xmlResolver.Add(null, null, 0, 0)); Assert.Throws<ArgumentNullException>(() => xmlResolver.Add(new Uri("https://html"), null, 0, 0)); Assert.Throws<ArgumentOutOfRangeException>(() => xmlResolver.Add(new Uri("https://html"), new byte[22], -1, 0)); Assert.Throws<ArgumentOutOfRangeException>(() => xmlResolver.Add(new Uri("https://html"), new byte[22], 0, -1)); Assert.Throws<ArgumentOutOfRangeException>(() => xmlResolver.Add(new Uri("https://html"), new byte[11], 5, 20)); //No Exception should be thrown for the below cases as these are border cases xmlResolver.Add(new Uri("https://html"), new byte[0], 0, 0); xmlResolver.Add(new Uri("https://html"), new byte[5], 0, 5); Assert.Throws<ArgumentNullException>(() => xmlResolver.Add(null, new MemoryStream())); Assert.Throws<ArgumentNullException>(() => xmlResolver.Add(new Uri("https://html"), null as MemoryStream)); Assert.Throws<ArgumentNullException>(() => xmlResolver.Add(null, string.Empty)); Assert.Throws<ArgumentNullException>(() => xmlResolver.Add(new Uri("https://html"), null as string)); } [Fact] public void XmlResolverAddWithValidData() { var xmlResolver = new XmlPreloadedResolver(XmlKnownDtds.Xhtml10); byte[] data = Encoding.ASCII.GetBytes("hello world"); MemoryStream stream = new MemoryStream(data); xmlResolver.Add(new Uri("-//Sample//URI//For Testing", UriKind.RelativeOrAbsolute), stream); Stream result = xmlResolver.GetEntity(new Uri("-//Sample//URI//For Testing", UriKind.RelativeOrAbsolute), null, typeof(Stream)) as Stream; Assert.NotNull(result); byte[] output = new byte[data.Length]; result.Read(output, 0, output.Length); Assert.Equal(data, output); DummyStream dummyStream = new DummyStream(data); xmlResolver.Add(new Uri("-//W3C//DTD XHTML 1.0 Strict//EN", UriKind.RelativeOrAbsolute), dummyStream); Stream otherResult = xmlResolver.GetEntity(new Uri("-//W3C//DTD XHTML 1.0 Strict//EN", UriKind.RelativeOrAbsolute), null, typeof(Stream)) as Stream; output = new byte[data.Length]; otherResult.Read(output, 0, output.Length); Assert.Equal(data, output); } [Fact] public void XmlResolverRemoveWithInvalidData() { var xmlResolver = new XmlPreloadedResolver(XmlKnownDtds.Xhtml10); Assert.Throws<ArgumentNullException>(() => xmlResolver.Remove(null)); } [Fact] public void XmlResolverRemoveWithValidData() { var xmlResolver = new XmlPreloadedResolver(XmlKnownDtds.Xhtml10); byte[] data = Encoding.ASCII.GetBytes("hello world"); MemoryStream stream = new MemoryStream(data); xmlResolver.Add(new Uri("-//W3C//DTD XHTML 1.0 Transitional//EN", UriKind.RelativeOrAbsolute), stream); xmlResolver.Remove(new Uri("-//W3C//DTD XHTML 1.0 Transitional//EN", UriKind.RelativeOrAbsolute)); } } }
-1
dotnet/runtime
66,372
Add Stopwatch.GetElapsedTime
Fixes https://github.com/dotnet/runtime/issues/65858
stephentoub
2022-03-09T01:52:28Z
2022-03-09T12:42:15Z
ca731545a58307870a0baebb0ee43eeea61f175f
c9f7f7389e8e9a00d501aef696333b67d218baac
Add Stopwatch.GetElapsedTime. Fixes https://github.com/dotnet/runtime/issues/65858
./src/libraries/System.Private.CoreLib/src/System/Runtime/CompilerServices/TupleElementNamesAttribute.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; namespace System.Runtime.CompilerServices { /// <summary> /// Indicates that the use of <see cref="System.ValueTuple"/> on a member is meant to be treated as a tuple with element names. /// </summary> [CLSCompliant(false)] [AttributeUsage(AttributeTargets.Field | AttributeTargets.Parameter | AttributeTargets.Property | AttributeTargets.ReturnValue | AttributeTargets.Class | AttributeTargets.Struct | AttributeTargets.Event)] public sealed class TupleElementNamesAttribute : Attribute { private readonly string?[] _transformNames; /// <summary> /// Initializes a new instance of the <see /// cref="TupleElementNamesAttribute"/> class. /// </summary> /// <param name="transformNames"> /// Specifies, in a pre-order depth-first traversal of a type's /// construction, which <see cref="System.ValueType"/> occurrences are /// meant to carry element names. /// </param> /// <remarks> /// This constructor is meant to be used on types that contain an /// instantiation of <see cref="System.ValueType"/> that contains /// element names. For instance, if <c>C</c> is a generic type with /// two type parameters, then a use of the constructed type <c>C{<see /// cref="System.ValueTuple{T1, T2}"/>, <see /// cref="System.ValueTuple{T1, T2, T3}"/></c> might be intended to /// treat the first type argument as a tuple with element names and the /// second as a tuple without element names. In which case, the /// appropriate attribute specification should use a /// <c>transformNames</c> value of <c>{ "name1", "name2", null, null, /// null }</c>. /// </remarks> public TupleElementNamesAttribute(string?[] transformNames!!) { _transformNames = transformNames; } /// <summary> /// Specifies, in a pre-order depth-first traversal of a type's /// construction, which <see cref="System.ValueTuple"/> elements are /// meant to carry element names. /// </summary> public IList<string?> TransformNames => _transformNames; } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; namespace System.Runtime.CompilerServices { /// <summary> /// Indicates that the use of <see cref="System.ValueTuple"/> on a member is meant to be treated as a tuple with element names. /// </summary> [CLSCompliant(false)] [AttributeUsage(AttributeTargets.Field | AttributeTargets.Parameter | AttributeTargets.Property | AttributeTargets.ReturnValue | AttributeTargets.Class | AttributeTargets.Struct | AttributeTargets.Event)] public sealed class TupleElementNamesAttribute : Attribute { private readonly string?[] _transformNames; /// <summary> /// Initializes a new instance of the <see /// cref="TupleElementNamesAttribute"/> class. /// </summary> /// <param name="transformNames"> /// Specifies, in a pre-order depth-first traversal of a type's /// construction, which <see cref="System.ValueType"/> occurrences are /// meant to carry element names. /// </param> /// <remarks> /// This constructor is meant to be used on types that contain an /// instantiation of <see cref="System.ValueType"/> that contains /// element names. For instance, if <c>C</c> is a generic type with /// two type parameters, then a use of the constructed type <c>C{<see /// cref="System.ValueTuple{T1, T2}"/>, <see /// cref="System.ValueTuple{T1, T2, T3}"/></c> might be intended to /// treat the first type argument as a tuple with element names and the /// second as a tuple without element names. In which case, the /// appropriate attribute specification should use a /// <c>transformNames</c> value of <c>{ "name1", "name2", null, null, /// null }</c>. /// </remarks> public TupleElementNamesAttribute(string?[] transformNames!!) { _transformNames = transformNames; } /// <summary> /// Specifies, in a pre-order depth-first traversal of a type's /// construction, which <see cref="System.ValueTuple"/> elements are /// meant to carry element names. /// </summary> public IList<string?> TransformNames => _transformNames; } }
-1
dotnet/runtime
66,372
Add Stopwatch.GetElapsedTime
Fixes https://github.com/dotnet/runtime/issues/65858
stephentoub
2022-03-09T01:52:28Z
2022-03-09T12:42:15Z
ca731545a58307870a0baebb0ee43eeea61f175f
c9f7f7389e8e9a00d501aef696333b67d218baac
Add Stopwatch.GetElapsedTime. Fixes https://github.com/dotnet/runtime/issues/65858
./src/libraries/System.ComponentModel.Composition/src/System/ComponentModel/Composition/CompositionErrorId.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. namespace System.ComponentModel.Composition { internal enum CompositionErrorId : int { Unknown = 0, InvalidExportMetadata, ImportNotSetOnPart, ImportEngine_ComposeTookTooManyIterations, ImportEngine_ImportCardinalityMismatch, ImportEngine_PartCycle, ImportEngine_PartCannotSetImport, ImportEngine_PartCannotGetExportedValue, ImportEngine_PartCannotActivate, ImportEngine_PreventedByExistingImport, ImportEngine_InvalidStateForRecomposition, ReflectionModel_ImportThrewException, ReflectionModel_ImportNotAssignableFromExport, ReflectionModel_ImportCollectionNull, ReflectionModel_ImportCollectionNotWritable, ReflectionModel_ImportCollectionConstructionThrewException, ReflectionModel_ImportCollectionGetThrewException, ReflectionModel_ImportCollectionIsReadOnlyThrewException, ReflectionModel_ImportCollectionClearThrewException, ReflectionModel_ImportCollectionAddThrewException, ReflectionModel_ImportManyOnParameterCanOnlyBeAssigned, } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. namespace System.ComponentModel.Composition { internal enum CompositionErrorId : int { Unknown = 0, InvalidExportMetadata, ImportNotSetOnPart, ImportEngine_ComposeTookTooManyIterations, ImportEngine_ImportCardinalityMismatch, ImportEngine_PartCycle, ImportEngine_PartCannotSetImport, ImportEngine_PartCannotGetExportedValue, ImportEngine_PartCannotActivate, ImportEngine_PreventedByExistingImport, ImportEngine_InvalidStateForRecomposition, ReflectionModel_ImportThrewException, ReflectionModel_ImportNotAssignableFromExport, ReflectionModel_ImportCollectionNull, ReflectionModel_ImportCollectionNotWritable, ReflectionModel_ImportCollectionConstructionThrewException, ReflectionModel_ImportCollectionGetThrewException, ReflectionModel_ImportCollectionIsReadOnlyThrewException, ReflectionModel_ImportCollectionClearThrewException, ReflectionModel_ImportCollectionAddThrewException, ReflectionModel_ImportManyOnParameterCanOnlyBeAssigned, } }
-1
dotnet/runtime
66,372
Add Stopwatch.GetElapsedTime
Fixes https://github.com/dotnet/runtime/issues/65858
stephentoub
2022-03-09T01:52:28Z
2022-03-09T12:42:15Z
ca731545a58307870a0baebb0ee43eeea61f175f
c9f7f7389e8e9a00d501aef696333b67d218baac
Add Stopwatch.GetElapsedTime. Fixes https://github.com/dotnet/runtime/issues/65858
./src/libraries/System.Diagnostics.FileVersionInfo/System.Diagnostics.FileVersionInfo.sln
Microsoft Visual Studio Solution File, Format Version 12.00 Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "TestUtilities", "..\Common\tests\TestUtilities\TestUtilities.csproj", "{D6C0D80D-9454-4965-A580-A43484972632}" EndProject Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "System.Diagnostics.FileVersionInfo", "ref\System.Diagnostics.FileVersionInfo.csproj", "{46E9AC6C-347E-4F94-80CB-E1857EC0C5C6}" EndProject Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "System.Diagnostics.FileVersionInfo", "src\System.Diagnostics.FileVersionInfo.csproj", "{922C96F5-9F92-4F0E-A83D-D1BCDDD187F2}" EndProject Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "System.Diagnostics.FileVersionInfo.TestAssembly", "tests\System.Diagnostics.FileVersionInfo.TestAssembly\System.Diagnostics.FileVersionInfo.TestAssembly.csproj", "{0B3AFB8E-37F9-4FF7-A5A1-1DABD484231F}" EndProject Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "System.Diagnostics.FileVersionInfo.Tests", "tests\System.Diagnostics.FileVersionInfo.Tests\System.Diagnostics.FileVersionInfo.Tests.csproj", "{2F5C48C2-CE74-49FD-BA41-25FCBCE53A49}" EndProject Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "LibraryImportGenerator", "..\System.Runtime.InteropServices\gen\LibraryImportGenerator\LibraryImportGenerator.csproj", "{B9224F74-9714-4C37-895C-BFAE0FC97B59}" EndProject Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Microsoft.Interop.SourceGeneration", "..\System.Runtime.InteropServices\gen\Microsoft.Interop.SourceGeneration\Microsoft.Interop.SourceGeneration.csproj", "{C07D96C2-F7BA-45D5-BAE2-C06B68FF9F05}" EndProject Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "tests", "tests", "{8042448A-EB54-4EBB-A1E0-BAA0B5A343D8}" EndProject Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "ref", "ref", "{36141B8B-E6F5-4AAF-A053-BDD189531BAF}" EndProject Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "src", "src", "{888B9739-0B73-4F21-A3CC-4694AEFE55CA}" EndProject Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "gen", "gen", "{AC3F6844-B70D-4DC3-A930-43073DA6F2D8}" EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution Debug|Any CPU = Debug|Any CPU Release|Any CPU = Release|Any CPU EndGlobalSection GlobalSection(ProjectConfigurationPlatforms) = postSolution {D6C0D80D-9454-4965-A580-A43484972632}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {D6C0D80D-9454-4965-A580-A43484972632}.Debug|Any CPU.Build.0 = Debug|Any CPU {D6C0D80D-9454-4965-A580-A43484972632}.Release|Any CPU.ActiveCfg = Release|Any CPU {D6C0D80D-9454-4965-A580-A43484972632}.Release|Any CPU.Build.0 = Release|Any CPU {46E9AC6C-347E-4F94-80CB-E1857EC0C5C6}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {46E9AC6C-347E-4F94-80CB-E1857EC0C5C6}.Debug|Any CPU.Build.0 = Debug|Any CPU {46E9AC6C-347E-4F94-80CB-E1857EC0C5C6}.Release|Any CPU.ActiveCfg = Release|Any CPU {46E9AC6C-347E-4F94-80CB-E1857EC0C5C6}.Release|Any CPU.Build.0 = Release|Any CPU {922C96F5-9F92-4F0E-A83D-D1BCDDD187F2}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {922C96F5-9F92-4F0E-A83D-D1BCDDD187F2}.Debug|Any CPU.Build.0 = Debug|Any CPU {922C96F5-9F92-4F0E-A83D-D1BCDDD187F2}.Release|Any CPU.ActiveCfg = Release|Any CPU {922C96F5-9F92-4F0E-A83D-D1BCDDD187F2}.Release|Any CPU.Build.0 = Release|Any CPU {0B3AFB8E-37F9-4FF7-A5A1-1DABD484231F}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {0B3AFB8E-37F9-4FF7-A5A1-1DABD484231F}.Debug|Any CPU.Build.0 = Debug|Any CPU {0B3AFB8E-37F9-4FF7-A5A1-1DABD484231F}.Release|Any CPU.ActiveCfg = Release|Any CPU {0B3AFB8E-37F9-4FF7-A5A1-1DABD484231F}.Release|Any CPU.Build.0 = Release|Any CPU {2F5C48C2-CE74-49FD-BA41-25FCBCE53A49}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {2F5C48C2-CE74-49FD-BA41-25FCBCE53A49}.Debug|Any CPU.Build.0 = Debug|Any CPU {2F5C48C2-CE74-49FD-BA41-25FCBCE53A49}.Release|Any CPU.ActiveCfg = Release|Any CPU {2F5C48C2-CE74-49FD-BA41-25FCBCE53A49}.Release|Any CPU.Build.0 = Release|Any CPU {B9224F74-9714-4C37-895C-BFAE0FC97B59}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {B9224F74-9714-4C37-895C-BFAE0FC97B59}.Debug|Any CPU.Build.0 = Debug|Any CPU {B9224F74-9714-4C37-895C-BFAE0FC97B59}.Release|Any CPU.ActiveCfg = Release|Any CPU {B9224F74-9714-4C37-895C-BFAE0FC97B59}.Release|Any CPU.Build.0 = Release|Any CPU {C07D96C2-F7BA-45D5-BAE2-C06B68FF9F05}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {C07D96C2-F7BA-45D5-BAE2-C06B68FF9F05}.Debug|Any CPU.Build.0 = Debug|Any CPU {C07D96C2-F7BA-45D5-BAE2-C06B68FF9F05}.Release|Any CPU.ActiveCfg = Release|Any CPU {C07D96C2-F7BA-45D5-BAE2-C06B68FF9F05}.Release|Any CPU.Build.0 = Release|Any CPU EndGlobalSection GlobalSection(SolutionProperties) = preSolution HideSolutionNode = FALSE EndGlobalSection GlobalSection(NestedProjects) = preSolution {D6C0D80D-9454-4965-A580-A43484972632} = {8042448A-EB54-4EBB-A1E0-BAA0B5A343D8} {0B3AFB8E-37F9-4FF7-A5A1-1DABD484231F} = {8042448A-EB54-4EBB-A1E0-BAA0B5A343D8} {2F5C48C2-CE74-49FD-BA41-25FCBCE53A49} = {8042448A-EB54-4EBB-A1E0-BAA0B5A343D8} {46E9AC6C-347E-4F94-80CB-E1857EC0C5C6} = {36141B8B-E6F5-4AAF-A053-BDD189531BAF} {922C96F5-9F92-4F0E-A83D-D1BCDDD187F2} = {888B9739-0B73-4F21-A3CC-4694AEFE55CA} {B9224F74-9714-4C37-895C-BFAE0FC97B59} = {AC3F6844-B70D-4DC3-A930-43073DA6F2D8} {C07D96C2-F7BA-45D5-BAE2-C06B68FF9F05} = {AC3F6844-B70D-4DC3-A930-43073DA6F2D8} EndGlobalSection GlobalSection(ExtensibilityGlobals) = postSolution SolutionGuid = {9FDF2FF3-5DFC-45E8-9959-B59FF906E689} EndGlobalSection EndGlobal
Microsoft Visual Studio Solution File, Format Version 12.00 Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "TestUtilities", "..\Common\tests\TestUtilities\TestUtilities.csproj", "{D6C0D80D-9454-4965-A580-A43484972632}" EndProject Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "System.Diagnostics.FileVersionInfo", "ref\System.Diagnostics.FileVersionInfo.csproj", "{46E9AC6C-347E-4F94-80CB-E1857EC0C5C6}" EndProject Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "System.Diagnostics.FileVersionInfo", "src\System.Diagnostics.FileVersionInfo.csproj", "{922C96F5-9F92-4F0E-A83D-D1BCDDD187F2}" EndProject Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "System.Diagnostics.FileVersionInfo.TestAssembly", "tests\System.Diagnostics.FileVersionInfo.TestAssembly\System.Diagnostics.FileVersionInfo.TestAssembly.csproj", "{0B3AFB8E-37F9-4FF7-A5A1-1DABD484231F}" EndProject Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "System.Diagnostics.FileVersionInfo.Tests", "tests\System.Diagnostics.FileVersionInfo.Tests\System.Diagnostics.FileVersionInfo.Tests.csproj", "{2F5C48C2-CE74-49FD-BA41-25FCBCE53A49}" EndProject Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "LibraryImportGenerator", "..\System.Runtime.InteropServices\gen\LibraryImportGenerator\LibraryImportGenerator.csproj", "{B9224F74-9714-4C37-895C-BFAE0FC97B59}" EndProject Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Microsoft.Interop.SourceGeneration", "..\System.Runtime.InteropServices\gen\Microsoft.Interop.SourceGeneration\Microsoft.Interop.SourceGeneration.csproj", "{C07D96C2-F7BA-45D5-BAE2-C06B68FF9F05}" EndProject Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "tests", "tests", "{8042448A-EB54-4EBB-A1E0-BAA0B5A343D8}" EndProject Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "ref", "ref", "{36141B8B-E6F5-4AAF-A053-BDD189531BAF}" EndProject Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "src", "src", "{888B9739-0B73-4F21-A3CC-4694AEFE55CA}" EndProject Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "gen", "gen", "{AC3F6844-B70D-4DC3-A930-43073DA6F2D8}" EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution Debug|Any CPU = Debug|Any CPU Release|Any CPU = Release|Any CPU EndGlobalSection GlobalSection(ProjectConfigurationPlatforms) = postSolution {D6C0D80D-9454-4965-A580-A43484972632}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {D6C0D80D-9454-4965-A580-A43484972632}.Debug|Any CPU.Build.0 = Debug|Any CPU {D6C0D80D-9454-4965-A580-A43484972632}.Release|Any CPU.ActiveCfg = Release|Any CPU {D6C0D80D-9454-4965-A580-A43484972632}.Release|Any CPU.Build.0 = Release|Any CPU {46E9AC6C-347E-4F94-80CB-E1857EC0C5C6}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {46E9AC6C-347E-4F94-80CB-E1857EC0C5C6}.Debug|Any CPU.Build.0 = Debug|Any CPU {46E9AC6C-347E-4F94-80CB-E1857EC0C5C6}.Release|Any CPU.ActiveCfg = Release|Any CPU {46E9AC6C-347E-4F94-80CB-E1857EC0C5C6}.Release|Any CPU.Build.0 = Release|Any CPU {922C96F5-9F92-4F0E-A83D-D1BCDDD187F2}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {922C96F5-9F92-4F0E-A83D-D1BCDDD187F2}.Debug|Any CPU.Build.0 = Debug|Any CPU {922C96F5-9F92-4F0E-A83D-D1BCDDD187F2}.Release|Any CPU.ActiveCfg = Release|Any CPU {922C96F5-9F92-4F0E-A83D-D1BCDDD187F2}.Release|Any CPU.Build.0 = Release|Any CPU {0B3AFB8E-37F9-4FF7-A5A1-1DABD484231F}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {0B3AFB8E-37F9-4FF7-A5A1-1DABD484231F}.Debug|Any CPU.Build.0 = Debug|Any CPU {0B3AFB8E-37F9-4FF7-A5A1-1DABD484231F}.Release|Any CPU.ActiveCfg = Release|Any CPU {0B3AFB8E-37F9-4FF7-A5A1-1DABD484231F}.Release|Any CPU.Build.0 = Release|Any CPU {2F5C48C2-CE74-49FD-BA41-25FCBCE53A49}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {2F5C48C2-CE74-49FD-BA41-25FCBCE53A49}.Debug|Any CPU.Build.0 = Debug|Any CPU {2F5C48C2-CE74-49FD-BA41-25FCBCE53A49}.Release|Any CPU.ActiveCfg = Release|Any CPU {2F5C48C2-CE74-49FD-BA41-25FCBCE53A49}.Release|Any CPU.Build.0 = Release|Any CPU {B9224F74-9714-4C37-895C-BFAE0FC97B59}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {B9224F74-9714-4C37-895C-BFAE0FC97B59}.Debug|Any CPU.Build.0 = Debug|Any CPU {B9224F74-9714-4C37-895C-BFAE0FC97B59}.Release|Any CPU.ActiveCfg = Release|Any CPU {B9224F74-9714-4C37-895C-BFAE0FC97B59}.Release|Any CPU.Build.0 = Release|Any CPU {C07D96C2-F7BA-45D5-BAE2-C06B68FF9F05}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {C07D96C2-F7BA-45D5-BAE2-C06B68FF9F05}.Debug|Any CPU.Build.0 = Debug|Any CPU {C07D96C2-F7BA-45D5-BAE2-C06B68FF9F05}.Release|Any CPU.ActiveCfg = Release|Any CPU {C07D96C2-F7BA-45D5-BAE2-C06B68FF9F05}.Release|Any CPU.Build.0 = Release|Any CPU EndGlobalSection GlobalSection(SolutionProperties) = preSolution HideSolutionNode = FALSE EndGlobalSection GlobalSection(NestedProjects) = preSolution {D6C0D80D-9454-4965-A580-A43484972632} = {8042448A-EB54-4EBB-A1E0-BAA0B5A343D8} {0B3AFB8E-37F9-4FF7-A5A1-1DABD484231F} = {8042448A-EB54-4EBB-A1E0-BAA0B5A343D8} {2F5C48C2-CE74-49FD-BA41-25FCBCE53A49} = {8042448A-EB54-4EBB-A1E0-BAA0B5A343D8} {46E9AC6C-347E-4F94-80CB-E1857EC0C5C6} = {36141B8B-E6F5-4AAF-A053-BDD189531BAF} {922C96F5-9F92-4F0E-A83D-D1BCDDD187F2} = {888B9739-0B73-4F21-A3CC-4694AEFE55CA} {B9224F74-9714-4C37-895C-BFAE0FC97B59} = {AC3F6844-B70D-4DC3-A930-43073DA6F2D8} {C07D96C2-F7BA-45D5-BAE2-C06B68FF9F05} = {AC3F6844-B70D-4DC3-A930-43073DA6F2D8} EndGlobalSection GlobalSection(ExtensibilityGlobals) = postSolution SolutionGuid = {9FDF2FF3-5DFC-45E8-9959-B59FF906E689} EndGlobalSection EndGlobal
-1
dotnet/runtime
66,372
Add Stopwatch.GetElapsedTime
Fixes https://github.com/dotnet/runtime/issues/65858
stephentoub
2022-03-09T01:52:28Z
2022-03-09T12:42:15Z
ca731545a58307870a0baebb0ee43eeea61f175f
c9f7f7389e8e9a00d501aef696333b67d218baac
Add Stopwatch.GetElapsedTime. Fixes https://github.com/dotnet/runtime/issues/65858
./src/libraries/System.Security.Permissions/src/System/Configuration/ConfigurationPermission.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Security; using System.Security.Permissions; namespace System.Configuration { #if NETCOREAPP [Obsolete(Obsoletions.CodeAccessSecurityMessage, DiagnosticId = Obsoletions.CodeAccessSecurityDiagId, UrlFormat = Obsoletions.SharedUrlFormat)] #endif public sealed class ConfigurationPermission : CodeAccessPermission, IUnrestrictedPermission { public ConfigurationPermission(PermissionState state) { } public bool IsUnrestricted() => false; public override IPermission Copy () { return default(IPermission); } public override IPermission Union(IPermission target) { return default(IPermission); } public override IPermission Intersect(IPermission target) { return default(IPermission); } public override bool IsSubsetOf(IPermission target) => false; public override void FromXml(SecurityElement securityElement) { } public override SecurityElement ToXml() { return default(SecurityElement); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Security; using System.Security.Permissions; namespace System.Configuration { #if NETCOREAPP [Obsolete(Obsoletions.CodeAccessSecurityMessage, DiagnosticId = Obsoletions.CodeAccessSecurityDiagId, UrlFormat = Obsoletions.SharedUrlFormat)] #endif public sealed class ConfigurationPermission : CodeAccessPermission, IUnrestrictedPermission { public ConfigurationPermission(PermissionState state) { } public bool IsUnrestricted() => false; public override IPermission Copy () { return default(IPermission); } public override IPermission Union(IPermission target) { return default(IPermission); } public override IPermission Intersect(IPermission target) { return default(IPermission); } public override bool IsSubsetOf(IPermission target) => false; public override void FromXml(SecurityElement securityElement) { } public override SecurityElement ToXml() { return default(SecurityElement); } } }
-1
dotnet/runtime
66,372
Add Stopwatch.GetElapsedTime
Fixes https://github.com/dotnet/runtime/issues/65858
stephentoub
2022-03-09T01:52:28Z
2022-03-09T12:42:15Z
ca731545a58307870a0baebb0ee43eeea61f175f
c9f7f7389e8e9a00d501aef696333b67d218baac
Add Stopwatch.GetElapsedTime. Fixes https://github.com/dotnet/runtime/issues/65858
./docs/design/libraries/LibraryImportGenerator/Pipeline.md
# P/Invoke Generation Pipeline The P/Invoke source generator is responsible for finding all methods marked with `GeneratedDllImportAttribute` and generating code for their implementations (stubs) and corresponding P/Invokes that will be called by the stubs. For every method, the steps are: 1. [Process the symbols and metadata](#symbols-and-metadata-processing) for the method, its parameters, and its return type. 1. [Determine the marshalling generators](#marshalling-generators) that will be responsible for generating the stub code for each parameter and return 1. [Generate the stub code](#stub-code-generation) 1. [Generate the corresponding P/Invoke](#pinvoke) 1. Add the generated source to the compilation. The pipeline uses the Roslyn [Syntax APIs](https://docs.microsoft.com/dotnet/api/microsoft.codeanalysis.csharp.syntax) to create the generated code. This imposes some structure for the marshalling generators and allows for easier inspection or modification (if desired) of the generated code. ## Symbol and metadata processing The generator processes the method's `GeneratedDllImportAttribute` data, the method's parameter and return types, and the metadata on them (e.g. [`LCIDConversionAttribute`](https://docs.microsoft.com/dotnet/api/system.runtime.interopservices.lcidconversionattribute), [`MarshalAsAttribute`][MarshalAsAttribute], [struct marshalling attributes](StructMarshalling.md)). This information is used to determine the corresponding native type for each managed parameter/return type and how they will be marshalled. A [`TypePositionInfo`][src-TypePositionInfo] is created for each type that needs to be marshalled. For each parameter and return type, this captures the managed type, managed and native positions (return or index in parameter list), and marshalling information. The marshalling information is represented by various subclasses of [`MarshallingInfo`][src-MarshallingAttributeInfo] and represents all user-defined marshalling information for the specific parameter or return type. These classes are intended to simply capture any specified marshalling information, not interpret what that information means in terms of marshalling behaviour; that is handled when determining the [marshalling generator](#marshalling-generators) for each `TypePositionInfo`. The processing step also includes handling any implicit parameter/return types that are required for the P/Invoke, but not part of the managed method signature; for example, a method with [`PreserveSig=false`][PreserveSig] requires an HRESULT return type and potentially an out parameter matching the managed method's return type. ### `PreserveSig=false` The below signature indicates that the native function returns an HRESULT, but has no other return value (out parameter). ```C# [GeneratedDllImport("Lib", PreserveSig = false)] static partial void Method(); ``` Processing the above signature would create a `TypePositionInfo` for the HRESULT return type for native call, with properties indicating that it is in the native return position and has no managed position. The actual P/Invoke would be: ```C# [DllImport("Lib", EntryPoint = "Method")] static partial int Method__PInvoke__(); ``` The below signature indicates that the native function returns an HRESULT and also has an out parameter to be used as the managed return value. ```C# [GeneratedDllImport("Lib", PreserveSig = false)] [return: MarshalAs(UnmanagedType.U1)] static partial bool MethodWithReturn(); ``` Processing the above signature would create a `TypePositionInfo` for the HRESULT return type for native call, with properties indicating that it is in the native return position and has no managed position. The `TypePositionInfo` representing the `bool` return on the managed method would have properties indicating it is the last parameter for the native call and is in the managed return position. The actual P/Invoke would be: ```C# [DllImport("Lib", EntryPoint = "MethodWithReturn")] static partial int MethodWithReturn__PInvoke__(byte* retVal); ``` ## Marshalling generators Each parameter and return for the method is handled by an [`IMarshallingGenerator`][src-MarshallingGenerator] instance. The processed information for each parameter and return type is used to determine the appropriate marshalling generator for handling that type. Support for different types can be added in the form of new implementations of `IMarshallingGenerator`. The marshalling generators are responsible for generating the code for each [stage](#stages) of the stub. They are intended to be stateless, such that they are given all the data ([`TypePositionInfo`][src_TypePositionInfo]) for which they need to generate marshalling code and the context ([`StubCodeContext`][src-StubCodeContext]) under which that code should be generated. ## Stub code generation Generation of the stub code happens in stages. The marshalling generator for each parameter and return is called to generate code for each stage of the stub. The statements and syntax provided by each marshalling generator for each stage combine to form the full stub implementation. The stub code generator itself will handle some initial setup and variable declarations: - Assign `out` parameters to `default` - Declare variable for managed representation of return value - Declare variables for native representation of parameters and return value (if necessary) ### Stages 1. `Setup`: initialization that happens before marshalling any data - If the method has a non-void return, call `Generate` on the marshalling generator for the return - Call `Generate` on the marshalling generator for every parameter 1. `Marshal`: conversion of managed to native data - Call `Generate` on the marshalling generator for every parameter 1. `Pin`: data pinning in preparation for calling the generated P/Invoke - Call `Generate` on the marshalling generator for every parameter - Ignore any statements that are not `fixed` statements 1. `Invoke`: call to the generated P/Invoke - Call `AsArgument` on the marshalling generator for every parameter - Create invocation statement that calls the generated P/Invoke 1. `KeepAlive`: keep alive any objects who's native representation won't keep them alive across the call. - Call `Generate` on the marshalling generator for every parameter. 1. `Unmarshal`: conversion of native to managed data - If the method has a non-void return, call `Generate` on the marshalling generator for the return - Call `Generate` on the marshalling generator for every parameter 1. `GuaranteedUnmarshal`: conversion of native to managed data even when an exception is thrown - Call `Generate` on the marshalling generator for every parameter. 1. `Cleanup`: free any allocated resources - Call `Generate` on the marshalling generator for every parameter Generated P/Invoke structure (if no code is generated for `GuaranteedUnmarshal` and `Cleanup`, the `try-finally` is omitted): ```C# << Variable Declarations >> << Setup >> try { << Marshal >> << Pin >> (fixed) { << Invoke >> } << Keep Alive >> << Unmarshal >> } finally { << GuaranteedUnmarshal >> << Cleanup >> } ``` ### Stub conditional features Some marshalling optimizations are only available in specific scenarios. Generally, there are 4 basic marshalling contexts: - P/Invoke - Reverse P/Invoke - User-defined structure marshalling - Non-blittable array marshalling This experiment generally is currently only focusing on two of the concepts: P/Invoke and non-blittable array marshalling (in the context of a P/Invoke). There are three features for specialized marshalling features that may only be available in some contexts: - Pinning to marshal data without copying (the `fixed` statement) - Stack allocation across the native context (using the `stackalloc` keyword or https://github.com/dotnet/runtime/issues/25423) - Storing additional temporary state in extra local variables Support for these features is indicated in code by the `abstract` `SingleFrameSpansNativeContext` and `AdditionalTemporaryStateLivesAcrossStages` properties on the `StubCodeContext` type. The `SingleFrameSpansNativeContext` property represents whether or not both pinning and stack-allocation are supported. These concepts are combined because we cannot safely support a conditional-stackalloc style API (such as https://github.com/dotnet/runtime/issues/52065) and safely get a pointer to data without also being able to pin. The various scenarios mentioned above have different levels of support for these specialized features: | Scenarios | Pinning and Stack allocation across the native context | Storing additional temporary state in locals | |------|-----|-----| | P/Invoke | supported | supported | | Reverse P/Invoke | unsupported | supported | | User-defined structure content marshalling | unsupported | unsupported | | non-blittable array marshalling | unsupported | unuspported | To help enable developers to use the full model described in the [Struct Marshalling design](./StructMarshalling.md), we declare that in contexts where `AdditionalTemporaryStateLivesAcrossStages` is false, developers can still assume that state declared in the `Setup` phase is valid in any phase, but any side effects in code emitted in a phase other than `Setup` will not be guaranteed to be visible in other phases. This enables developers to still use the identifiers declared in the `Setup` phase in their other phases, but they'll need to take care to design their generators to handle these rules. ### `SetLastError=true` The stub code generation also handles [`SetLastError=true`][SetLastError] behaviour. This configuration indicates that system error code ([`errno`](https://en.wikipedia.org/wiki/Errno.h) on Unix, [`GetLastError`](https://docs.microsoft.com/windows/win32/api/errhandlingapi/nf-errhandlingapi-getlasterror) on Windows) should be stored after the native invocation, such that it can be retrieved using [`Marshal.GetLastWin32Error`](https://docs.microsoft.com/dotnet/api/system.runtime.interopservices.marshal.getlastwin32error). This means that, rather than simply invoke the native method, the generated stub will: 1. Clear the system error by setting it to 0 2. Invoke the native method 3. Get the system error 4. Set the stored error for the P/Invoke (accessible via `Marshal.GetLastWin32Error`) A core requirement of this functionality is that the P/Invoke called in (2) is blittable (the purpose of the P/Invoke source generator), such that there will be no additional operations (e.g unmarshalling) after the invocation that could change the system error that is retrieved in (3). Similarly, (3) must not involve any operations before getting the system error that could change the system error. This also relies on the runtime itself handling preserving the last error (see `BEGIN/END_PRESERVE_LAST_ERROR` macros) during JIT and P/Invoke resolution. Clearing the system error (1) is necessary because the native method may not set the error at all on success and the system error would retain its value from a previous operation. The developer should be able to check `Marshal.GetLastWin32Error` after a P/Inovke to determine success or failure, so the stub explicitly clears the error before the native invocation, such that the last error will indicate success if the native call does not change it. ## P/Invoke The P/Invoke called by the stub is created based on the user's original declaration of the stub. The signature is generated using the syntax returned by `AsNativeType` and `AsParameter` of the marshalling generators for the return and parameters. Any marshalling attributes on the return and parameters of the managed method - [`MarshalAsAttribute`][MarshalAsAttribute], [`InAttribute`][InAttribute], [`OutAttribute`][OutAttribute] - are dropped. The fields of the [`DllImportAttribute`][DllImportAttribute] are set based on the fields of `GeneratedDllImportAttribute` as follows: | Field | Behaviour | | ------------------------------------------------- | --------- | | [`BestFitMapping`][BestFitMapping] | Not supported. See [Compatibility](Compatibility.md). | [`CallingConvention`][CallingConvention] | Passed through to `DllImport`. | [`CharSet`][CharSet] | Passed through to `DllImport`. | [`EntryPoint`][EntryPoint] | If set, passed through to `DllImport`. If not set, explicitly set to method name. | [`ExactSpelling`][ExactSpelling] | Passed through to `DllImport`. | [`PreserveSig`][PreserveSig] | Handled by generated source. Not on generated `DllImport`. | [`SetLastError`][SetLastError] | Handled by generated source. Not on generated `DllImport`. | [`ThrowOnUnmappableChar`][ThrowOnUnmappableChar] | Not supported. See [Compatibility](Compatibility.md). ### Examples Explicit `EntryPoint`: ```C# // Original declaration [GeneratedDllImport("Lib")] static partial void Method(out int i); // Generated P/Invoke [DllImport("Lib", EntryPoint = "Method")] static partial void Method__PInvoke__(int* i); ``` Passed through: ```C# // Original declaration [GeneratedDllImport("Lib", EntryPoint = "EntryPoint", CharSet = CharSet.Unicode)] static partial int Method(string s); // Generated P/Invoke [DllImport("Lib", EntryPoint = "EntryPoint", CharSet = CharSet.Unicode)] static partial int Method__PInvoke__(ushort* s); ``` Handled by generated source (dropped from `DllImport`): ```C# // Original declaration [GeneratedDllImport("Lib", SetLastError = true)] [return: [MarshalAs(UnmanagedType.U1)] static partial bool Method([In][MarshasAs(UnmanagedType.LPWStr)] string s); // Generated P/Invoke [DllImport("Lib", EntryPoint = "Method")] static partial byte Method__PInvoke__(ushort* s); ``` <!-- Links --> [src-MarshallingAttributeInfo]: /src/libraries/System.Runtime.InteropServices/gen/Microsoft.Interop.SourceGeneration/MarshallingAttributeInfo.cs [src-MarshallingGenerator]: /src/libraries/System.Runtime.InteropServices/gen/Microsoft.Interop.SourceGeneration/DllImportGenerator/Marshalling/MarshallingGenerator.cs [src-StubCodeContext]: /src/libraries/System.Runtime.InteropServices/gen/Microsoft.Interop.SourceGeneration/DllImportGenerator/StubCodeContext.cs [src-TypePositionInfo]: /src/libraries/System.Runtime.InteropServices/gen/Microsoft.Interop.SourceGeneration/DllImportGenerator/TypePositionInfo.cs [DllImportAttribute]: https://docs.microsoft.com/dotnet/api/system.runtime.interopservices.dllimportattribute [MarshalAsAttribute]: https://docs.microsoft.com/dotnet/api/system.runtime.interopservices.marshalasattribute [InAttribute]: https://docs.microsoft.com/dotnet/api/system.runtime.interopservices.inattribute [OutAttribute]: https://docs.microsoft.com/dotnet/api/system.runtime.interopservices.outattribute [BestFitMapping]: https://docs.microsoft.com/dotnet/api/system.runtime.interopservices.dllimportattribute.bestfitmapping [CallingConvention]: https://docs.microsoft.com/dotnet/api/system.runtime.interopservices.dllimportattribute.callingconvention [CharSet]: https://docs.microsoft.com/dotnet/api/system.runtime.interopservices.dllimportattribute.charset [EntryPoint]: https://docs.microsoft.com/dotnet/api/system.runtime.interopservices.dllimportattribute.entrypoint [ExactSpelling]: https://docs.microsoft.com/dotnet/api/system.runtime.interopservices.dllimportattribute.exactspelling [PreserveSig]: https://docs.microsoft.com/dotnet/api/system.runtime.interopservices.dllimportattribute.preservesig [SetLastError]: https://docs.microsoft.com/dotnet/api/system.runtime.interopservices.dllimportattribute.setlasterror [ThrowOnUnmappableChar]: https://docs.microsoft.com/dotnet/api/system.runtime.interopservices.dllimportattribute.throwonunmappablechar
# P/Invoke Generation Pipeline The P/Invoke source generator is responsible for finding all methods marked with `GeneratedDllImportAttribute` and generating code for their implementations (stubs) and corresponding P/Invokes that will be called by the stubs. For every method, the steps are: 1. [Process the symbols and metadata](#symbols-and-metadata-processing) for the method, its parameters, and its return type. 1. [Determine the marshalling generators](#marshalling-generators) that will be responsible for generating the stub code for each parameter and return 1. [Generate the stub code](#stub-code-generation) 1. [Generate the corresponding P/Invoke](#pinvoke) 1. Add the generated source to the compilation. The pipeline uses the Roslyn [Syntax APIs](https://docs.microsoft.com/dotnet/api/microsoft.codeanalysis.csharp.syntax) to create the generated code. This imposes some structure for the marshalling generators and allows for easier inspection or modification (if desired) of the generated code. ## Symbol and metadata processing The generator processes the method's `GeneratedDllImportAttribute` data, the method's parameter and return types, and the metadata on them (e.g. [`LCIDConversionAttribute`](https://docs.microsoft.com/dotnet/api/system.runtime.interopservices.lcidconversionattribute), [`MarshalAsAttribute`][MarshalAsAttribute], [struct marshalling attributes](StructMarshalling.md)). This information is used to determine the corresponding native type for each managed parameter/return type and how they will be marshalled. A [`TypePositionInfo`][src-TypePositionInfo] is created for each type that needs to be marshalled. For each parameter and return type, this captures the managed type, managed and native positions (return or index in parameter list), and marshalling information. The marshalling information is represented by various subclasses of [`MarshallingInfo`][src-MarshallingAttributeInfo] and represents all user-defined marshalling information for the specific parameter or return type. These classes are intended to simply capture any specified marshalling information, not interpret what that information means in terms of marshalling behaviour; that is handled when determining the [marshalling generator](#marshalling-generators) for each `TypePositionInfo`. The processing step also includes handling any implicit parameter/return types that are required for the P/Invoke, but not part of the managed method signature; for example, a method with [`PreserveSig=false`][PreserveSig] requires an HRESULT return type and potentially an out parameter matching the managed method's return type. ### `PreserveSig=false` The below signature indicates that the native function returns an HRESULT, but has no other return value (out parameter). ```C# [GeneratedDllImport("Lib", PreserveSig = false)] static partial void Method(); ``` Processing the above signature would create a `TypePositionInfo` for the HRESULT return type for native call, with properties indicating that it is in the native return position and has no managed position. The actual P/Invoke would be: ```C# [DllImport("Lib", EntryPoint = "Method")] static partial int Method__PInvoke__(); ``` The below signature indicates that the native function returns an HRESULT and also has an out parameter to be used as the managed return value. ```C# [GeneratedDllImport("Lib", PreserveSig = false)] [return: MarshalAs(UnmanagedType.U1)] static partial bool MethodWithReturn(); ``` Processing the above signature would create a `TypePositionInfo` for the HRESULT return type for native call, with properties indicating that it is in the native return position and has no managed position. The `TypePositionInfo` representing the `bool` return on the managed method would have properties indicating it is the last parameter for the native call and is in the managed return position. The actual P/Invoke would be: ```C# [DllImport("Lib", EntryPoint = "MethodWithReturn")] static partial int MethodWithReturn__PInvoke__(byte* retVal); ``` ## Marshalling generators Each parameter and return for the method is handled by an [`IMarshallingGenerator`][src-MarshallingGenerator] instance. The processed information for each parameter and return type is used to determine the appropriate marshalling generator for handling that type. Support for different types can be added in the form of new implementations of `IMarshallingGenerator`. The marshalling generators are responsible for generating the code for each [stage](#stages) of the stub. They are intended to be stateless, such that they are given all the data ([`TypePositionInfo`][src_TypePositionInfo]) for which they need to generate marshalling code and the context ([`StubCodeContext`][src-StubCodeContext]) under which that code should be generated. ## Stub code generation Generation of the stub code happens in stages. The marshalling generator for each parameter and return is called to generate code for each stage of the stub. The statements and syntax provided by each marshalling generator for each stage combine to form the full stub implementation. The stub code generator itself will handle some initial setup and variable declarations: - Assign `out` parameters to `default` - Declare variable for managed representation of return value - Declare variables for native representation of parameters and return value (if necessary) ### Stages 1. `Setup`: initialization that happens before marshalling any data - If the method has a non-void return, call `Generate` on the marshalling generator for the return - Call `Generate` on the marshalling generator for every parameter 1. `Marshal`: conversion of managed to native data - Call `Generate` on the marshalling generator for every parameter 1. `Pin`: data pinning in preparation for calling the generated P/Invoke - Call `Generate` on the marshalling generator for every parameter - Ignore any statements that are not `fixed` statements 1. `Invoke`: call to the generated P/Invoke - Call `AsArgument` on the marshalling generator for every parameter - Create invocation statement that calls the generated P/Invoke 1. `KeepAlive`: keep alive any objects who's native representation won't keep them alive across the call. - Call `Generate` on the marshalling generator for every parameter. 1. `Unmarshal`: conversion of native to managed data - If the method has a non-void return, call `Generate` on the marshalling generator for the return - Call `Generate` on the marshalling generator for every parameter 1. `GuaranteedUnmarshal`: conversion of native to managed data even when an exception is thrown - Call `Generate` on the marshalling generator for every parameter. 1. `Cleanup`: free any allocated resources - Call `Generate` on the marshalling generator for every parameter Generated P/Invoke structure (if no code is generated for `GuaranteedUnmarshal` and `Cleanup`, the `try-finally` is omitted): ```C# << Variable Declarations >> << Setup >> try { << Marshal >> << Pin >> (fixed) { << Invoke >> } << Keep Alive >> << Unmarshal >> } finally { << GuaranteedUnmarshal >> << Cleanup >> } ``` ### Stub conditional features Some marshalling optimizations are only available in specific scenarios. Generally, there are 4 basic marshalling contexts: - P/Invoke - Reverse P/Invoke - User-defined structure marshalling - Non-blittable array marshalling This experiment generally is currently only focusing on two of the concepts: P/Invoke and non-blittable array marshalling (in the context of a P/Invoke). There are three features for specialized marshalling features that may only be available in some contexts: - Pinning to marshal data without copying (the `fixed` statement) - Stack allocation across the native context (using the `stackalloc` keyword or https://github.com/dotnet/runtime/issues/25423) - Storing additional temporary state in extra local variables Support for these features is indicated in code by the `abstract` `SingleFrameSpansNativeContext` and `AdditionalTemporaryStateLivesAcrossStages` properties on the `StubCodeContext` type. The `SingleFrameSpansNativeContext` property represents whether or not both pinning and stack-allocation are supported. These concepts are combined because we cannot safely support a conditional-stackalloc style API (such as https://github.com/dotnet/runtime/issues/52065) and safely get a pointer to data without also being able to pin. The various scenarios mentioned above have different levels of support for these specialized features: | Scenarios | Pinning and Stack allocation across the native context | Storing additional temporary state in locals | |------|-----|-----| | P/Invoke | supported | supported | | Reverse P/Invoke | unsupported | supported | | User-defined structure content marshalling | unsupported | unsupported | | non-blittable array marshalling | unsupported | unuspported | To help enable developers to use the full model described in the [Struct Marshalling design](./StructMarshalling.md), we declare that in contexts where `AdditionalTemporaryStateLivesAcrossStages` is false, developers can still assume that state declared in the `Setup` phase is valid in any phase, but any side effects in code emitted in a phase other than `Setup` will not be guaranteed to be visible in other phases. This enables developers to still use the identifiers declared in the `Setup` phase in their other phases, but they'll need to take care to design their generators to handle these rules. ### `SetLastError=true` The stub code generation also handles [`SetLastError=true`][SetLastError] behaviour. This configuration indicates that system error code ([`errno`](https://en.wikipedia.org/wiki/Errno.h) on Unix, [`GetLastError`](https://docs.microsoft.com/windows/win32/api/errhandlingapi/nf-errhandlingapi-getlasterror) on Windows) should be stored after the native invocation, such that it can be retrieved using [`Marshal.GetLastWin32Error`](https://docs.microsoft.com/dotnet/api/system.runtime.interopservices.marshal.getlastwin32error). This means that, rather than simply invoke the native method, the generated stub will: 1. Clear the system error by setting it to 0 2. Invoke the native method 3. Get the system error 4. Set the stored error for the P/Invoke (accessible via `Marshal.GetLastWin32Error`) A core requirement of this functionality is that the P/Invoke called in (2) is blittable (the purpose of the P/Invoke source generator), such that there will be no additional operations (e.g unmarshalling) after the invocation that could change the system error that is retrieved in (3). Similarly, (3) must not involve any operations before getting the system error that could change the system error. This also relies on the runtime itself handling preserving the last error (see `BEGIN/END_PRESERVE_LAST_ERROR` macros) during JIT and P/Invoke resolution. Clearing the system error (1) is necessary because the native method may not set the error at all on success and the system error would retain its value from a previous operation. The developer should be able to check `Marshal.GetLastWin32Error` after a P/Inovke to determine success or failure, so the stub explicitly clears the error before the native invocation, such that the last error will indicate success if the native call does not change it. ## P/Invoke The P/Invoke called by the stub is created based on the user's original declaration of the stub. The signature is generated using the syntax returned by `AsNativeType` and `AsParameter` of the marshalling generators for the return and parameters. Any marshalling attributes on the return and parameters of the managed method - [`MarshalAsAttribute`][MarshalAsAttribute], [`InAttribute`][InAttribute], [`OutAttribute`][OutAttribute] - are dropped. The fields of the [`DllImportAttribute`][DllImportAttribute] are set based on the fields of `GeneratedDllImportAttribute` as follows: | Field | Behaviour | | ------------------------------------------------- | --------- | | [`BestFitMapping`][BestFitMapping] | Not supported. See [Compatibility](Compatibility.md). | [`CallingConvention`][CallingConvention] | Passed through to `DllImport`. | [`CharSet`][CharSet] | Passed through to `DllImport`. | [`EntryPoint`][EntryPoint] | If set, passed through to `DllImport`. If not set, explicitly set to method name. | [`ExactSpelling`][ExactSpelling] | Passed through to `DllImport`. | [`PreserveSig`][PreserveSig] | Handled by generated source. Not on generated `DllImport`. | [`SetLastError`][SetLastError] | Handled by generated source. Not on generated `DllImport`. | [`ThrowOnUnmappableChar`][ThrowOnUnmappableChar] | Not supported. See [Compatibility](Compatibility.md). ### Examples Explicit `EntryPoint`: ```C# // Original declaration [GeneratedDllImport("Lib")] static partial void Method(out int i); // Generated P/Invoke [DllImport("Lib", EntryPoint = "Method")] static partial void Method__PInvoke__(int* i); ``` Passed through: ```C# // Original declaration [GeneratedDllImport("Lib", EntryPoint = "EntryPoint", CharSet = CharSet.Unicode)] static partial int Method(string s); // Generated P/Invoke [DllImport("Lib", EntryPoint = "EntryPoint", CharSet = CharSet.Unicode)] static partial int Method__PInvoke__(ushort* s); ``` Handled by generated source (dropped from `DllImport`): ```C# // Original declaration [GeneratedDllImport("Lib", SetLastError = true)] [return: [MarshalAs(UnmanagedType.U1)] static partial bool Method([In][MarshasAs(UnmanagedType.LPWStr)] string s); // Generated P/Invoke [DllImport("Lib", EntryPoint = "Method")] static partial byte Method__PInvoke__(ushort* s); ``` <!-- Links --> [src-MarshallingAttributeInfo]: /src/libraries/System.Runtime.InteropServices/gen/Microsoft.Interop.SourceGeneration/MarshallingAttributeInfo.cs [src-MarshallingGenerator]: /src/libraries/System.Runtime.InteropServices/gen/Microsoft.Interop.SourceGeneration/DllImportGenerator/Marshalling/MarshallingGenerator.cs [src-StubCodeContext]: /src/libraries/System.Runtime.InteropServices/gen/Microsoft.Interop.SourceGeneration/DllImportGenerator/StubCodeContext.cs [src-TypePositionInfo]: /src/libraries/System.Runtime.InteropServices/gen/Microsoft.Interop.SourceGeneration/DllImportGenerator/TypePositionInfo.cs [DllImportAttribute]: https://docs.microsoft.com/dotnet/api/system.runtime.interopservices.dllimportattribute [MarshalAsAttribute]: https://docs.microsoft.com/dotnet/api/system.runtime.interopservices.marshalasattribute [InAttribute]: https://docs.microsoft.com/dotnet/api/system.runtime.interopservices.inattribute [OutAttribute]: https://docs.microsoft.com/dotnet/api/system.runtime.interopservices.outattribute [BestFitMapping]: https://docs.microsoft.com/dotnet/api/system.runtime.interopservices.dllimportattribute.bestfitmapping [CallingConvention]: https://docs.microsoft.com/dotnet/api/system.runtime.interopservices.dllimportattribute.callingconvention [CharSet]: https://docs.microsoft.com/dotnet/api/system.runtime.interopservices.dllimportattribute.charset [EntryPoint]: https://docs.microsoft.com/dotnet/api/system.runtime.interopservices.dllimportattribute.entrypoint [ExactSpelling]: https://docs.microsoft.com/dotnet/api/system.runtime.interopservices.dllimportattribute.exactspelling [PreserveSig]: https://docs.microsoft.com/dotnet/api/system.runtime.interopservices.dllimportattribute.preservesig [SetLastError]: https://docs.microsoft.com/dotnet/api/system.runtime.interopservices.dllimportattribute.setlasterror [ThrowOnUnmappableChar]: https://docs.microsoft.com/dotnet/api/system.runtime.interopservices.dllimportattribute.throwonunmappablechar
-1
dotnet/runtime
66,372
Add Stopwatch.GetElapsedTime
Fixes https://github.com/dotnet/runtime/issues/65858
stephentoub
2022-03-09T01:52:28Z
2022-03-09T12:42:15Z
ca731545a58307870a0baebb0ee43eeea61f175f
c9f7f7389e8e9a00d501aef696333b67d218baac
Add Stopwatch.GetElapsedTime. Fixes https://github.com/dotnet/runtime/issues/65858
./src/tests/Loader/classloader/TypeGeneratorTests/TypeGeneratorTest667/Generated667.il
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern mscorlib { .publickeytoken = (B7 7A 5C 56 19 34 E0 89 ) .ver 4:0:0:0 } .assembly extern TestFramework { .publickeytoken = ( B0 3F 5F 7F 11 D5 0A 3A ) } //TYPES IN FORWARDER ASSEMBLIES: //TEST ASSEMBLY: .assembly Generated667 { .hash algorithm 0x00008004 } .assembly extern xunit.core {} .class public BaseClass0 { .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void [mscorlib]System.Object::.ctor() ret } } .class public BaseClass1 extends BaseClass0 { .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void BaseClass0::.ctor() ret } } .class public G3_C1139`1<T0> extends class G2_C191`1<class BaseClass0> implements class IBase1`1<class BaseClass0> { .method public hidebysig newslot virtual instance string Method4() cil managed noinlining { ldstr "G3_C1139::Method4.14540()" ret } .method public hidebysig newslot virtual instance string 'IBase1<class BaseClass0>.Method4'() cil managed noinlining { .override method instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G3_C1139::Method4.MI.14541()" ret } .method public hidebysig virtual instance string Method5() cil managed noinlining { ldstr "G3_C1139::Method5.14542()" ret } .method public hidebysig virtual instance string Method6<M0>() cil managed noinlining { ldstr "G3_C1139::Method6.14543<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string 'IBase1<class BaseClass0>.Method6'<M0>() cil managed noinlining { .override method instance string class IBase1`1<class BaseClass0>::Method6<[1]>() ldstr "G3_C1139::Method6.MI.14544<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string ClassMethod3675() cil managed noinlining { ldstr "G3_C1139::ClassMethod3675.14545()" ret } .method public hidebysig newslot virtual instance string ClassMethod3676() cil managed noinlining { ldstr "G3_C1139::ClassMethod3676.14546()" ret } .method public hidebysig newslot virtual instance string 'G2_C191<class BaseClass0>.ClassMethod1709'() cil managed noinlining { .override method instance string class G2_C191`1<class BaseClass0>::ClassMethod1709() ldstr "G3_C1139::ClassMethod1709.MI.14547()" ret } .method public hidebysig newslot virtual instance string 'G2_C191<class BaseClass0>.ClassMethod1710'() cil managed noinlining { .override method instance string class G2_C191`1<class BaseClass0>::ClassMethod1710() ldstr "G3_C1139::ClassMethod1710.MI.14548()" ret } .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void class G2_C191`1<class BaseClass0>::.ctor() ret } } .class public abstract G2_C191`1<T0> extends class G1_C13`2<class BaseClass1,!T0> implements class IBase1`1<!T0>, class IBase2`2<class BaseClass1,class BaseClass1> { .method public hidebysig newslot virtual instance string Method4() cil managed noinlining { ldstr "G2_C191::Method4.6552()" ret } .method public hidebysig newslot virtual instance string 'IBase1<T0>.Method4'() cil managed noinlining { .override method instance string class IBase1`1<!T0>::Method4() ldstr "G2_C191::Method4.MI.6553()" ret } .method public hidebysig newslot virtual instance string Method5() cil managed noinlining { ldstr "G2_C191::Method5.6554()" ret } .method public hidebysig newslot virtual instance string 'IBase1<T0>.Method5'() cil managed noinlining { .override method instance string class IBase1`1<!T0>::Method5() ldstr "G2_C191::Method5.MI.6555()" ret } .method public hidebysig virtual instance string Method6<M0>() cil managed noinlining { ldstr "G2_C191::Method6.6556<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string Method7<M0>() cil managed noinlining { ldstr "G2_C191::Method7.6557<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string 'IBase2<class BaseClass1,class BaseClass1>.Method7'<M0>() cil managed noinlining { .override method instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<[1]>() ldstr "G2_C191::Method7.MI.6558<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string ClassMethod1709() cil managed noinlining { ldstr "G2_C191::ClassMethod1709.6559()" ret } .method public hidebysig newslot virtual instance string ClassMethod1710() cil managed noinlining { ldstr "G2_C191::ClassMethod1710.6560()" ret } .method public hidebysig newslot virtual instance string ClassMethod1711<M0>() cil managed noinlining { ldstr "G2_C191::ClassMethod1711.6561<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string 'G1_C13<class BaseClass1,T0>.ClassMethod1348'<M0>() cil managed noinlining { .override method instance string class G1_C13`2<class BaseClass1,!T0>::ClassMethod1348<[1]>() ldstr "G2_C191::ClassMethod1348.MI.6562<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void class G1_C13`2<class BaseClass1,!T0>::.ctor() ret } } .class interface public abstract IBase1`1<+T0> { .method public hidebysig newslot abstract virtual instance string Method4() cil managed { } .method public hidebysig newslot abstract virtual instance string Method5() cil managed { } .method public hidebysig newslot abstract virtual instance string Method6<M0>() cil managed { } } .class public abstract G1_C13`2<T0, T1> implements class IBase2`2<!T0,!T1>, class IBase1`1<!T0> { .method public hidebysig virtual instance string Method7<M0>() cil managed noinlining { ldstr "G1_C13::Method7.4871<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string Method4() cil managed noinlining { ldstr "G1_C13::Method4.4872()" ret } .method public hidebysig newslot virtual instance string Method5() cil managed noinlining { ldstr "G1_C13::Method5.4873()" ret } .method public hidebysig newslot virtual instance string 'IBase1<T0>.Method5'() cil managed noinlining { .override method instance string class IBase1`1<!T0>::Method5() ldstr "G1_C13::Method5.MI.4874()" ret } .method public hidebysig newslot virtual instance string Method6<M0>() cil managed noinlining { ldstr "G1_C13::Method6.4875<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string ClassMethod1348<M0>() cil managed noinlining { ldstr "G1_C13::ClassMethod1348.4876<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string ClassMethod1349<M0>() cil managed noinlining { ldstr "G1_C13::ClassMethod1349.4877<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void [mscorlib]System.Object::.ctor() ret } } .class interface public abstract IBase2`2<+T0, -T1> { .method public hidebysig newslot abstract virtual instance string Method7<M0>() cil managed { } } .class public auto ansi beforefieldinit Generated667 { .method static void M.BaseClass0<(BaseClass0)W>(!!W inst, string exp) cil managed { .maxstack 5 .locals init (string[] actualResults) ldc.i4.s 0 newarr string stloc.s actualResults ldarg.1 ldstr "M.BaseClass0<(BaseClass0)W>(!!W inst, string exp)" ldc.i4.s 0 ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.BaseClass1<(BaseClass1)W>(!!W inst, string exp) cil managed { .maxstack 5 .locals init (string[] actualResults) ldc.i4.s 0 newarr string stloc.s actualResults ldarg.1 ldstr "M.BaseClass1<(BaseClass1)W>(!!W inst, string exp)" ldc.i4.s 0 ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G3_C1139.T<T0,(class G3_C1139`1<!!T0>)W>(!!W 'inst', string exp) cil managed { .maxstack 16 .locals init (string[] actualResults) ldc.i4.s 11 newarr string stloc.s actualResults ldarg.1 ldstr "M.G3_C1139.T<T0,(class G3_C1139`1<!!T0>)W>(!!W 'inst', string exp)" ldc.i4.s 11 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1139`1<!!T0>::ClassMethod1348<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1139`1<!!T0>::ClassMethod1349<object>() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1139`1<!!T0>::ClassMethod1709() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1139`1<!!T0>::ClassMethod1710() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1139`1<!!T0>::ClassMethod1711<object>() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1139`1<!!T0>::ClassMethod3675() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1139`1<!!T0>::ClassMethod3676() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1139`1<!!T0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1139`1<!!T0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 9 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1139`1<!!T0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 10 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1139`1<!!T0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G3_C1139.A<(class G3_C1139`1<class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 16 .locals init (string[] actualResults) ldc.i4.s 11 newarr string stloc.s actualResults ldarg.1 ldstr "M.G3_C1139.A<(class G3_C1139`1<class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 11 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1139`1<class BaseClass0>::ClassMethod1348<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1139`1<class BaseClass0>::ClassMethod1349<object>() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1139`1<class BaseClass0>::ClassMethod1709() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1139`1<class BaseClass0>::ClassMethod1710() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1139`1<class BaseClass0>::ClassMethod1711<object>() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1139`1<class BaseClass0>::ClassMethod3675() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1139`1<class BaseClass0>::ClassMethod3676() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1139`1<class BaseClass0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1139`1<class BaseClass0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 9 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1139`1<class BaseClass0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 10 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1139`1<class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G3_C1139.B<(class G3_C1139`1<class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 16 .locals init (string[] actualResults) ldc.i4.s 11 newarr string stloc.s actualResults ldarg.1 ldstr "M.G3_C1139.B<(class G3_C1139`1<class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 11 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1139`1<class BaseClass1>::ClassMethod1348<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1139`1<class BaseClass1>::ClassMethod1349<object>() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1139`1<class BaseClass1>::ClassMethod1709() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1139`1<class BaseClass1>::ClassMethod1710() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1139`1<class BaseClass1>::ClassMethod1711<object>() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1139`1<class BaseClass1>::ClassMethod3675() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1139`1<class BaseClass1>::ClassMethod3676() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1139`1<class BaseClass1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1139`1<class BaseClass1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 9 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1139`1<class BaseClass1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 10 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1139`1<class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C191.T<T0,(class G2_C191`1<!!T0>)W>(!!W 'inst', string exp) cil managed { .maxstack 14 .locals init (string[] actualResults) ldc.i4.s 9 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C191.T<T0,(class G2_C191`1<!!T0>)W>(!!W 'inst', string exp)" ldc.i4.s 9 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C191`1<!!T0>::ClassMethod1348<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C191`1<!!T0>::ClassMethod1349<object>() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C191`1<!!T0>::ClassMethod1709() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C191`1<!!T0>::ClassMethod1710() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C191`1<!!T0>::ClassMethod1711<object>() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G2_C191`1<!!T0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G2_C191`1<!!T0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G2_C191`1<!!T0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G2_C191`1<!!T0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C191.A<(class G2_C191`1<class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 14 .locals init (string[] actualResults) ldc.i4.s 9 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C191.A<(class G2_C191`1<class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 9 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C191`1<class BaseClass0>::ClassMethod1348<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C191`1<class BaseClass0>::ClassMethod1349<object>() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C191`1<class BaseClass0>::ClassMethod1709() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C191`1<class BaseClass0>::ClassMethod1710() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C191`1<class BaseClass0>::ClassMethod1711<object>() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G2_C191`1<class BaseClass0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G2_C191`1<class BaseClass0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G2_C191`1<class BaseClass0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G2_C191`1<class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C191.B<(class G2_C191`1<class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 14 .locals init (string[] actualResults) ldc.i4.s 9 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C191.B<(class G2_C191`1<class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 9 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C191`1<class BaseClass1>::ClassMethod1348<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C191`1<class BaseClass1>::ClassMethod1349<object>() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C191`1<class BaseClass1>::ClassMethod1709() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C191`1<class BaseClass1>::ClassMethod1710() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C191`1<class BaseClass1>::ClassMethod1711<object>() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G2_C191`1<class BaseClass1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G2_C191`1<class BaseClass1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G2_C191`1<class BaseClass1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G2_C191`1<class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase1.T<T0,(class IBase1`1<!!T0>)W>(!!W 'inst', string exp) cil managed { .maxstack 8 .locals init (string[] actualResults) ldc.i4.s 3 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase1.T<T0,(class IBase1`1<!!T0>)W>(!!W 'inst', string exp)" ldc.i4.s 3 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<!!T0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<!!T0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<!!T0>::Method6<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase1.A<(class IBase1`1<class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 8 .locals init (string[] actualResults) ldc.i4.s 3 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase1.A<(class IBase1`1<class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 3 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase1.B<(class IBase1`1<class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 8 .locals init (string[] actualResults) ldc.i4.s 3 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase1.B<(class IBase1`1<class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 3 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C13.T.T<T0,T1,(class G1_C13`2<!!T0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 11 .locals init (string[] actualResults) ldc.i4.s 6 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C13.T.T<T0,T1,(class G1_C13`2<!!T0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 6 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<!!T0,!!T1>::ClassMethod1348<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<!!T0,!!T1>::ClassMethod1349<object>() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<!!T0,!!T1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<!!T0,!!T1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<!!T0,!!T1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<!!T0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C13.A.T<T1,(class G1_C13`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 11 .locals init (string[] actualResults) ldc.i4.s 6 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C13.A.T<T1,(class G1_C13`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 6 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass0,!!T1>::ClassMethod1348<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass0,!!T1>::ClassMethod1349<object>() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass0,!!T1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass0,!!T1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass0,!!T1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C13.A.A<(class G1_C13`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 11 .locals init (string[] actualResults) ldc.i4.s 6 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C13.A.A<(class G1_C13`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 6 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass0,class BaseClass0>::ClassMethod1348<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass0,class BaseClass0>::ClassMethod1349<object>() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass0,class BaseClass0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass0,class BaseClass0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass0,class BaseClass0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass0,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C13.A.B<(class G1_C13`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 11 .locals init (string[] actualResults) ldc.i4.s 6 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C13.A.B<(class G1_C13`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 6 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass0,class BaseClass1>::ClassMethod1348<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass0,class BaseClass1>::ClassMethod1349<object>() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass0,class BaseClass1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass0,class BaseClass1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass0,class BaseClass1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass0,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C13.B.T<T1,(class G1_C13`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 11 .locals init (string[] actualResults) ldc.i4.s 6 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C13.B.T<T1,(class G1_C13`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 6 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass1,!!T1>::ClassMethod1348<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass1,!!T1>::ClassMethod1349<object>() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass1,!!T1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass1,!!T1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass1,!!T1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass1,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C13.B.A<(class G1_C13`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 11 .locals init (string[] actualResults) ldc.i4.s 6 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C13.B.A<(class G1_C13`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 6 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass1,class BaseClass0>::ClassMethod1348<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass1,class BaseClass0>::ClassMethod1349<object>() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass1,class BaseClass0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass1,class BaseClass0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass1,class BaseClass0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass1,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C13.B.B<(class G1_C13`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 11 .locals init (string[] actualResults) ldc.i4.s 6 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C13.B.B<(class G1_C13`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 6 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass1,class BaseClass1>::ClassMethod1348<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass1,class BaseClass1>::ClassMethod1349<object>() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass1,class BaseClass1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass1,class BaseClass1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass1,class BaseClass1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass1,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.T.T<T0,T1,(class IBase2`2<!!T0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.T.T<T0,T1,(class IBase2`2<!!T0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<!!T0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.A.T<T1,(class IBase2`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.A.T<T1,(class IBase2`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.A.A<(class IBase2`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.A.A<(class IBase2`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.A.B<(class IBase2`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.A.B<(class IBase2`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.B.T<T1,(class IBase2`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.B.T<T1,(class IBase2`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass1,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.B.A<(class IBase2`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.B.A<(class IBase2`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.B.B<(class IBase2`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.B.B<(class IBase2`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method public hidebysig static void MethodCallingTest() cil managed { .maxstack 10 .locals init (object V_0) ldstr "========================== Method Calling Test ==========================" call void [mscorlib]System.Console::WriteLine(string) newobj instance void class G3_C1139`1<class BaseClass0>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C13`2<class BaseClass1,class BaseClass0> callvirt instance string class G1_C13`2<class BaseClass1,class BaseClass0>::ClassMethod1349<object>() ldstr "G1_C13::ClassMethod1349.4877<System.Object>()" ldstr "class G1_C13`2<class BaseClass1,class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C13`2<class BaseClass1,class BaseClass0> callvirt instance string class G1_C13`2<class BaseClass1,class BaseClass0>::ClassMethod1348<object>() ldstr "G2_C191::ClassMethod1348.MI.6562<System.Object>()" ldstr "class G1_C13`2<class BaseClass1,class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C13`2<class BaseClass1,class BaseClass0> callvirt instance string class G1_C13`2<class BaseClass1,class BaseClass0>::Method6<object>() ldstr "G3_C1139::Method6.14543<System.Object>()" ldstr "class G1_C13`2<class BaseClass1,class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C13`2<class BaseClass1,class BaseClass0> callvirt instance string class G1_C13`2<class BaseClass1,class BaseClass0>::Method5() ldstr "G1_C13::Method5.4873()" ldstr "class G1_C13`2<class BaseClass1,class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C13`2<class BaseClass1,class BaseClass0> callvirt instance string class G1_C13`2<class BaseClass1,class BaseClass0>::Method4() ldstr "G1_C13::Method4.4872()" ldstr "class G1_C13`2<class BaseClass1,class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C13`2<class BaseClass1,class BaseClass0> callvirt instance string class G1_C13`2<class BaseClass1,class BaseClass0>::Method7<object>() ldstr "G1_C13::Method7.4871<System.Object>()" ldstr "class G1_C13`2<class BaseClass1,class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() ldstr "G1_C13::Method7.4871<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass1>::Method4() ldstr "G1_C13::Method4.4872()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass1>::Method5() ldstr "G1_C13::Method5.MI.4874()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>() ldstr "G3_C1139::Method6.14543<System.Object>()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G1_C13::Method7.4871<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G2_C191::Method7.MI.6558<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G2_C191::Method7.MI.6558<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G3_C1139::Method4.MI.14541()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G3_C1139::Method5.14542()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G3_C1139::Method6.MI.14544<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G2_C191`1<class BaseClass0> callvirt instance string class G2_C191`1<class BaseClass0>::ClassMethod1711<object>() ldstr "G2_C191::ClassMethod1711.6561<System.Object>()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C191`1<class BaseClass0> callvirt instance string class G2_C191`1<class BaseClass0>::ClassMethod1710() ldstr "G3_C1139::ClassMethod1710.MI.14548()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C191`1<class BaseClass0> callvirt instance string class G2_C191`1<class BaseClass0>::ClassMethod1709() ldstr "G3_C1139::ClassMethod1709.MI.14547()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C191`1<class BaseClass0> callvirt instance string class G2_C191`1<class BaseClass0>::Method7<object>() ldstr "G2_C191::Method7.6557<System.Object>()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C191`1<class BaseClass0> callvirt instance string class G2_C191`1<class BaseClass0>::Method6<object>() ldstr "G3_C1139::Method6.14543<System.Object>()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C191`1<class BaseClass0> callvirt instance string class G2_C191`1<class BaseClass0>::Method5() ldstr "G3_C1139::Method5.14542()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C191`1<class BaseClass0> callvirt instance string class G2_C191`1<class BaseClass0>::Method4() ldstr "G2_C191::Method4.6552()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C191`1<class BaseClass0> callvirt instance string class G2_C191`1<class BaseClass0>::ClassMethod1349<object>() ldstr "G1_C13::ClassMethod1349.4877<System.Object>()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C191`1<class BaseClass0> callvirt instance string class G2_C191`1<class BaseClass0>::ClassMethod1348<object>() ldstr "G2_C191::ClassMethod1348.MI.6562<System.Object>()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G3_C1139`1<class BaseClass0> callvirt instance string class G3_C1139`1<class BaseClass0>::ClassMethod3676() ldstr "G3_C1139::ClassMethod3676.14546()" ldstr "class G3_C1139`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1139`1<class BaseClass0> callvirt instance string class G3_C1139`1<class BaseClass0>::ClassMethod3675() ldstr "G3_C1139::ClassMethod3675.14545()" ldstr "class G3_C1139`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1139`1<class BaseClass0> callvirt instance string class G3_C1139`1<class BaseClass0>::Method6<object>() ldstr "G3_C1139::Method6.14543<System.Object>()" ldstr "class G3_C1139`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1139`1<class BaseClass0> callvirt instance string class G3_C1139`1<class BaseClass0>::Method5() ldstr "G3_C1139::Method5.14542()" ldstr "class G3_C1139`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1139`1<class BaseClass0> callvirt instance string class G3_C1139`1<class BaseClass0>::Method4() ldstr "G3_C1139::Method4.14540()" ldstr "class G3_C1139`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1139`1<class BaseClass0> callvirt instance string class G3_C1139`1<class BaseClass0>::ClassMethod1711<object>() ldstr "G2_C191::ClassMethod1711.6561<System.Object>()" ldstr "class G3_C1139`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1139`1<class BaseClass0> callvirt instance string class G3_C1139`1<class BaseClass0>::ClassMethod1710() ldstr "G3_C1139::ClassMethod1710.MI.14548()" ldstr "class G3_C1139`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1139`1<class BaseClass0> callvirt instance string class G3_C1139`1<class BaseClass0>::ClassMethod1709() ldstr "G3_C1139::ClassMethod1709.MI.14547()" ldstr "class G3_C1139`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1139`1<class BaseClass0> callvirt instance string class G3_C1139`1<class BaseClass0>::Method7<object>() ldstr "G2_C191::Method7.6557<System.Object>()" ldstr "class G3_C1139`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1139`1<class BaseClass0> callvirt instance string class G3_C1139`1<class BaseClass0>::ClassMethod1349<object>() ldstr "G1_C13::ClassMethod1349.4877<System.Object>()" ldstr "class G3_C1139`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1139`1<class BaseClass0> callvirt instance string class G3_C1139`1<class BaseClass0>::ClassMethod1348<object>() ldstr "G2_C191::ClassMethod1348.MI.6562<System.Object>()" ldstr "class G3_C1139`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G3_C1139`1<class BaseClass1>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C13`2<class BaseClass1,class BaseClass0> callvirt instance string class G1_C13`2<class BaseClass1,class BaseClass0>::ClassMethod1349<object>() ldstr "G1_C13::ClassMethod1349.4877<System.Object>()" ldstr "class G1_C13`2<class BaseClass1,class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C13`2<class BaseClass1,class BaseClass0> callvirt instance string class G1_C13`2<class BaseClass1,class BaseClass0>::ClassMethod1348<object>() ldstr "G2_C191::ClassMethod1348.MI.6562<System.Object>()" ldstr "class G1_C13`2<class BaseClass1,class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C13`2<class BaseClass1,class BaseClass0> callvirt instance string class G1_C13`2<class BaseClass1,class BaseClass0>::Method6<object>() ldstr "G3_C1139::Method6.14543<System.Object>()" ldstr "class G1_C13`2<class BaseClass1,class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C13`2<class BaseClass1,class BaseClass0> callvirt instance string class G1_C13`2<class BaseClass1,class BaseClass0>::Method5() ldstr "G1_C13::Method5.4873()" ldstr "class G1_C13`2<class BaseClass1,class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C13`2<class BaseClass1,class BaseClass0> callvirt instance string class G1_C13`2<class BaseClass1,class BaseClass0>::Method4() ldstr "G1_C13::Method4.4872()" ldstr "class G1_C13`2<class BaseClass1,class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C13`2<class BaseClass1,class BaseClass0> callvirt instance string class G1_C13`2<class BaseClass1,class BaseClass0>::Method7<object>() ldstr "G1_C13::Method7.4871<System.Object>()" ldstr "class G1_C13`2<class BaseClass1,class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() ldstr "G1_C13::Method7.4871<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass1>::Method4() ldstr "G1_C13::Method4.4872()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass1>::Method5() ldstr "G1_C13::Method5.MI.4874()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>() ldstr "G3_C1139::Method6.14543<System.Object>()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G1_C13::Method7.4871<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G2_C191::Method7.MI.6558<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G2_C191::Method7.MI.6558<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G3_C1139::Method4.MI.14541()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G3_C1139::Method5.14542()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G3_C1139::Method6.MI.14544<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G2_C191`1<class BaseClass0> callvirt instance string class G2_C191`1<class BaseClass0>::ClassMethod1711<object>() ldstr "G2_C191::ClassMethod1711.6561<System.Object>()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C191`1<class BaseClass0> callvirt instance string class G2_C191`1<class BaseClass0>::ClassMethod1710() ldstr "G3_C1139::ClassMethod1710.MI.14548()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C191`1<class BaseClass0> callvirt instance string class G2_C191`1<class BaseClass0>::ClassMethod1709() ldstr "G3_C1139::ClassMethod1709.MI.14547()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C191`1<class BaseClass0> callvirt instance string class G2_C191`1<class BaseClass0>::Method7<object>() ldstr "G2_C191::Method7.6557<System.Object>()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C191`1<class BaseClass0> callvirt instance string class G2_C191`1<class BaseClass0>::Method6<object>() ldstr "G3_C1139::Method6.14543<System.Object>()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C191`1<class BaseClass0> callvirt instance string class G2_C191`1<class BaseClass0>::Method5() ldstr "G3_C1139::Method5.14542()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C191`1<class BaseClass0> callvirt instance string class G2_C191`1<class BaseClass0>::Method4() ldstr "G2_C191::Method4.6552()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C191`1<class BaseClass0> callvirt instance string class G2_C191`1<class BaseClass0>::ClassMethod1349<object>() ldstr "G1_C13::ClassMethod1349.4877<System.Object>()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C191`1<class BaseClass0> callvirt instance string class G2_C191`1<class BaseClass0>::ClassMethod1348<object>() ldstr "G2_C191::ClassMethod1348.MI.6562<System.Object>()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G3_C1139`1<class BaseClass1> callvirt instance string class G3_C1139`1<class BaseClass1>::ClassMethod3676() ldstr "G3_C1139::ClassMethod3676.14546()" ldstr "class G3_C1139`1<class BaseClass1> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1139`1<class BaseClass1> callvirt instance string class G3_C1139`1<class BaseClass1>::ClassMethod3675() ldstr "G3_C1139::ClassMethod3675.14545()" ldstr "class G3_C1139`1<class BaseClass1> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1139`1<class BaseClass1> callvirt instance string class G3_C1139`1<class BaseClass1>::Method6<object>() ldstr "G3_C1139::Method6.14543<System.Object>()" ldstr "class G3_C1139`1<class BaseClass1> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1139`1<class BaseClass1> callvirt instance string class G3_C1139`1<class BaseClass1>::Method5() ldstr "G3_C1139::Method5.14542()" ldstr "class G3_C1139`1<class BaseClass1> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1139`1<class BaseClass1> callvirt instance string class G3_C1139`1<class BaseClass1>::Method4() ldstr "G3_C1139::Method4.14540()" ldstr "class G3_C1139`1<class BaseClass1> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1139`1<class BaseClass1> callvirt instance string class G3_C1139`1<class BaseClass1>::ClassMethod1711<object>() ldstr "G2_C191::ClassMethod1711.6561<System.Object>()" ldstr "class G3_C1139`1<class BaseClass1> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1139`1<class BaseClass1> callvirt instance string class G3_C1139`1<class BaseClass1>::ClassMethod1710() ldstr "G3_C1139::ClassMethod1710.MI.14548()" ldstr "class G3_C1139`1<class BaseClass1> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1139`1<class BaseClass1> callvirt instance string class G3_C1139`1<class BaseClass1>::ClassMethod1709() ldstr "G3_C1139::ClassMethod1709.MI.14547()" ldstr "class G3_C1139`1<class BaseClass1> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1139`1<class BaseClass1> callvirt instance string class G3_C1139`1<class BaseClass1>::Method7<object>() ldstr "G2_C191::Method7.6557<System.Object>()" ldstr "class G3_C1139`1<class BaseClass1> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1139`1<class BaseClass1> callvirt instance string class G3_C1139`1<class BaseClass1>::ClassMethod1349<object>() ldstr "G1_C13::ClassMethod1349.4877<System.Object>()" ldstr "class G3_C1139`1<class BaseClass1> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1139`1<class BaseClass1> callvirt instance string class G3_C1139`1<class BaseClass1>::ClassMethod1348<object>() ldstr "G2_C191::ClassMethod1348.MI.6562<System.Object>()" ldstr "class G3_C1139`1<class BaseClass1> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldstr "========================================================================\n\n" call void [mscorlib]System.Console::WriteLine(string) ret } .method public hidebysig static void ConstrainedCallsTest() cil managed { .maxstack 10 .locals init (object V_0) ldstr "========================== Constrained Calls Test ==========================" call void [mscorlib]System.Console::WriteLine(string) newobj instance void class G3_C1139`1<class BaseClass0>::.ctor() stloc.0 ldloc.0 ldstr "G2_C191::ClassMethod1348.MI.6562<System.Object>()#G1_C13::ClassMethod1349.4877<System.Object>()#G1_C13::Method4.4872()#G1_C13::Method5.4873()#G3_C1139::Method6.14543<System.Object>()#G1_C13::Method7.4871<System.Object>()#" call void Generated667::M.G1_C13.T.T<class BaseClass1,class BaseClass0,class G3_C1139`1<class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C191::ClassMethod1348.MI.6562<System.Object>()#G1_C13::ClassMethod1349.4877<System.Object>()#G1_C13::Method4.4872()#G1_C13::Method5.4873()#G3_C1139::Method6.14543<System.Object>()#G1_C13::Method7.4871<System.Object>()#" call void Generated667::M.G1_C13.B.T<class BaseClass0,class G3_C1139`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C191::ClassMethod1348.MI.6562<System.Object>()#G1_C13::ClassMethod1349.4877<System.Object>()#G1_C13::Method4.4872()#G1_C13::Method5.4873()#G3_C1139::Method6.14543<System.Object>()#G1_C13::Method7.4871<System.Object>()#" call void Generated667::M.G1_C13.B.A<class G3_C1139`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C13::Method7.4871<System.Object>()#" call void Generated667::M.IBase2.T.T<class BaseClass1,class BaseClass0,class G3_C1139`1<class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C13::Method7.4871<System.Object>()#" call void Generated667::M.IBase2.B.T<class BaseClass0,class G3_C1139`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C13::Method7.4871<System.Object>()#" call void Generated667::M.IBase2.B.A<class G3_C1139`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C13::Method4.4872()#G1_C13::Method5.MI.4874()#G3_C1139::Method6.14543<System.Object>()#" call void Generated667::M.IBase1.T<class BaseClass1,class G3_C1139`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C13::Method4.4872()#G1_C13::Method5.MI.4874()#G3_C1139::Method6.14543<System.Object>()#" call void Generated667::M.IBase1.B<class G3_C1139`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C13::Method7.4871<System.Object>()#" call void Generated667::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G3_C1139`1<class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C13::Method7.4871<System.Object>()#" call void Generated667::M.IBase2.A.T<class BaseClass0,class G3_C1139`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C13::Method7.4871<System.Object>()#" call void Generated667::M.IBase2.A.A<class G3_C1139`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C191::Method7.MI.6558<System.Object>()#" call void Generated667::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G3_C1139`1<class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C191::Method7.MI.6558<System.Object>()#" call void Generated667::M.IBase2.A.T<class BaseClass1,class G3_C1139`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C191::Method7.MI.6558<System.Object>()#" call void Generated667::M.IBase2.A.B<class G3_C1139`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C191::Method7.MI.6558<System.Object>()#" call void Generated667::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G3_C1139`1<class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C191::Method7.MI.6558<System.Object>()#" call void Generated667::M.IBase2.B.T<class BaseClass1,class G3_C1139`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C191::Method7.MI.6558<System.Object>()#" call void Generated667::M.IBase2.B.B<class G3_C1139`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G3_C1139::Method4.MI.14541()#G3_C1139::Method5.14542()#G3_C1139::Method6.MI.14544<System.Object>()#" call void Generated667::M.IBase1.T<class BaseClass0,class G3_C1139`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G3_C1139::Method4.MI.14541()#G3_C1139::Method5.14542()#G3_C1139::Method6.MI.14544<System.Object>()#" call void Generated667::M.IBase1.A<class G3_C1139`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C191::ClassMethod1348.MI.6562<System.Object>()#G1_C13::ClassMethod1349.4877<System.Object>()#G3_C1139::ClassMethod1709.MI.14547()#G3_C1139::ClassMethod1710.MI.14548()#G2_C191::ClassMethod1711.6561<System.Object>()#G2_C191::Method4.6552()#G3_C1139::Method5.14542()#G3_C1139::Method6.14543<System.Object>()#G2_C191::Method7.6557<System.Object>()#" call void Generated667::M.G2_C191.T<class BaseClass0,class G3_C1139`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C191::ClassMethod1348.MI.6562<System.Object>()#G1_C13::ClassMethod1349.4877<System.Object>()#G3_C1139::ClassMethod1709.MI.14547()#G3_C1139::ClassMethod1710.MI.14548()#G2_C191::ClassMethod1711.6561<System.Object>()#G2_C191::Method4.6552()#G3_C1139::Method5.14542()#G3_C1139::Method6.14543<System.Object>()#G2_C191::Method7.6557<System.Object>()#" call void Generated667::M.G2_C191.A<class G3_C1139`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C191::ClassMethod1348.MI.6562<System.Object>()#G1_C13::ClassMethod1349.4877<System.Object>()#G3_C1139::ClassMethod1709.MI.14547()#G3_C1139::ClassMethod1710.MI.14548()#G2_C191::ClassMethod1711.6561<System.Object>()#G3_C1139::ClassMethod3675.14545()#G3_C1139::ClassMethod3676.14546()#G3_C1139::Method4.14540()#G3_C1139::Method5.14542()#G3_C1139::Method6.14543<System.Object>()#G2_C191::Method7.6557<System.Object>()#" call void Generated667::M.G3_C1139.T<class BaseClass0,class G3_C1139`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C191::ClassMethod1348.MI.6562<System.Object>()#G1_C13::ClassMethod1349.4877<System.Object>()#G3_C1139::ClassMethod1709.MI.14547()#G3_C1139::ClassMethod1710.MI.14548()#G2_C191::ClassMethod1711.6561<System.Object>()#G3_C1139::ClassMethod3675.14545()#G3_C1139::ClassMethod3676.14546()#G3_C1139::Method4.14540()#G3_C1139::Method5.14542()#G3_C1139::Method6.14543<System.Object>()#G2_C191::Method7.6557<System.Object>()#" call void Generated667::M.G3_C1139.A<class G3_C1139`1<class BaseClass0>>(!!0,string) newobj instance void class G3_C1139`1<class BaseClass1>::.ctor() stloc.0 ldloc.0 ldstr "G2_C191::ClassMethod1348.MI.6562<System.Object>()#G1_C13::ClassMethod1349.4877<System.Object>()#G1_C13::Method4.4872()#G1_C13::Method5.4873()#G3_C1139::Method6.14543<System.Object>()#G1_C13::Method7.4871<System.Object>()#" call void Generated667::M.G1_C13.T.T<class BaseClass1,class BaseClass0,class G3_C1139`1<class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C191::ClassMethod1348.MI.6562<System.Object>()#G1_C13::ClassMethod1349.4877<System.Object>()#G1_C13::Method4.4872()#G1_C13::Method5.4873()#G3_C1139::Method6.14543<System.Object>()#G1_C13::Method7.4871<System.Object>()#" call void Generated667::M.G1_C13.B.T<class BaseClass0,class G3_C1139`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C191::ClassMethod1348.MI.6562<System.Object>()#G1_C13::ClassMethod1349.4877<System.Object>()#G1_C13::Method4.4872()#G1_C13::Method5.4873()#G3_C1139::Method6.14543<System.Object>()#G1_C13::Method7.4871<System.Object>()#" call void Generated667::M.G1_C13.B.A<class G3_C1139`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C13::Method7.4871<System.Object>()#" call void Generated667::M.IBase2.T.T<class BaseClass1,class BaseClass0,class G3_C1139`1<class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C13::Method7.4871<System.Object>()#" call void Generated667::M.IBase2.B.T<class BaseClass0,class G3_C1139`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C13::Method7.4871<System.Object>()#" call void Generated667::M.IBase2.B.A<class G3_C1139`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C13::Method4.4872()#G1_C13::Method5.MI.4874()#G3_C1139::Method6.14543<System.Object>()#" call void Generated667::M.IBase1.T<class BaseClass1,class G3_C1139`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C13::Method4.4872()#G1_C13::Method5.MI.4874()#G3_C1139::Method6.14543<System.Object>()#" call void Generated667::M.IBase1.B<class G3_C1139`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C13::Method7.4871<System.Object>()#" call void Generated667::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G3_C1139`1<class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C13::Method7.4871<System.Object>()#" call void Generated667::M.IBase2.A.T<class BaseClass0,class G3_C1139`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C13::Method7.4871<System.Object>()#" call void Generated667::M.IBase2.A.A<class G3_C1139`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C191::Method7.MI.6558<System.Object>()#" call void Generated667::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G3_C1139`1<class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C191::Method7.MI.6558<System.Object>()#" call void Generated667::M.IBase2.A.T<class BaseClass1,class G3_C1139`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C191::Method7.MI.6558<System.Object>()#" call void Generated667::M.IBase2.A.B<class G3_C1139`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C191::Method7.MI.6558<System.Object>()#" call void Generated667::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G3_C1139`1<class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C191::Method7.MI.6558<System.Object>()#" call void Generated667::M.IBase2.B.T<class BaseClass1,class G3_C1139`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C191::Method7.MI.6558<System.Object>()#" call void Generated667::M.IBase2.B.B<class G3_C1139`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G3_C1139::Method4.MI.14541()#G3_C1139::Method5.14542()#G3_C1139::Method6.MI.14544<System.Object>()#" call void Generated667::M.IBase1.T<class BaseClass0,class G3_C1139`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G3_C1139::Method4.MI.14541()#G3_C1139::Method5.14542()#G3_C1139::Method6.MI.14544<System.Object>()#" call void Generated667::M.IBase1.A<class G3_C1139`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C191::ClassMethod1348.MI.6562<System.Object>()#G1_C13::ClassMethod1349.4877<System.Object>()#G3_C1139::ClassMethod1709.MI.14547()#G3_C1139::ClassMethod1710.MI.14548()#G2_C191::ClassMethod1711.6561<System.Object>()#G2_C191::Method4.6552()#G3_C1139::Method5.14542()#G3_C1139::Method6.14543<System.Object>()#G2_C191::Method7.6557<System.Object>()#" call void Generated667::M.G2_C191.T<class BaseClass0,class G3_C1139`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C191::ClassMethod1348.MI.6562<System.Object>()#G1_C13::ClassMethod1349.4877<System.Object>()#G3_C1139::ClassMethod1709.MI.14547()#G3_C1139::ClassMethod1710.MI.14548()#G2_C191::ClassMethod1711.6561<System.Object>()#G2_C191::Method4.6552()#G3_C1139::Method5.14542()#G3_C1139::Method6.14543<System.Object>()#G2_C191::Method7.6557<System.Object>()#" call void Generated667::M.G2_C191.A<class G3_C1139`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C191::ClassMethod1348.MI.6562<System.Object>()#G1_C13::ClassMethod1349.4877<System.Object>()#G3_C1139::ClassMethod1709.MI.14547()#G3_C1139::ClassMethod1710.MI.14548()#G2_C191::ClassMethod1711.6561<System.Object>()#G3_C1139::ClassMethod3675.14545()#G3_C1139::ClassMethod3676.14546()#G3_C1139::Method4.14540()#G3_C1139::Method5.14542()#G3_C1139::Method6.14543<System.Object>()#G2_C191::Method7.6557<System.Object>()#" call void Generated667::M.G3_C1139.T<class BaseClass1,class G3_C1139`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C191::ClassMethod1348.MI.6562<System.Object>()#G1_C13::ClassMethod1349.4877<System.Object>()#G3_C1139::ClassMethod1709.MI.14547()#G3_C1139::ClassMethod1710.MI.14548()#G2_C191::ClassMethod1711.6561<System.Object>()#G3_C1139::ClassMethod3675.14545()#G3_C1139::ClassMethod3676.14546()#G3_C1139::Method4.14540()#G3_C1139::Method5.14542()#G3_C1139::Method6.14543<System.Object>()#G2_C191::Method7.6557<System.Object>()#" call void Generated667::M.G3_C1139.B<class G3_C1139`1<class BaseClass1>>(!!0,string) ldstr "========================================================================\n\n" call void [mscorlib]System.Console::WriteLine(string) ret } .method public hidebysig static void StructConstrainedInterfaceCallsTest() cil managed { .maxstack 10 ldstr "===================== Struct Constrained Interface Calls Test =====================" call void [mscorlib]System.Console::WriteLine(string) ldstr "========================================================================\n\n" call void [mscorlib]System.Console::WriteLine(string) ret } .method public hidebysig static void CalliTest() cil managed { .maxstack 10 .locals init (object V_0) ldstr "========================== Method Calli Test ==========================" call void [mscorlib]System.Console::WriteLine(string) newobj instance void class G3_C1139`1<class BaseClass0>::.ctor() stloc.0 ldloc.0 castclass class G1_C13`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C13`2<class BaseClass1,class BaseClass0>::ClassMethod1349<object>() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G1_C13::ClassMethod1349.4877<System.Object>()" ldstr "class G1_C13`2<class BaseClass1,class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C13`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C13`2<class BaseClass1,class BaseClass0>::ClassMethod1348<object>() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G2_C191::ClassMethod1348.MI.6562<System.Object>()" ldstr "class G1_C13`2<class BaseClass1,class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C13`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C13`2<class BaseClass1,class BaseClass0>::Method6<object>() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G3_C1139::Method6.14543<System.Object>()" ldstr "class G1_C13`2<class BaseClass1,class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C13`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C13`2<class BaseClass1,class BaseClass0>::Method5() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G1_C13::Method5.4873()" ldstr "class G1_C13`2<class BaseClass1,class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C13`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C13`2<class BaseClass1,class BaseClass0>::Method4() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G1_C13::Method4.4872()" ldstr "class G1_C13`2<class BaseClass1,class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C13`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C13`2<class BaseClass1,class BaseClass0>::Method7<object>() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G1_C13::Method7.4871<System.Object>()" ldstr "class G1_C13`2<class BaseClass1,class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G1_C13::Method7.4871<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method4() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G1_C13::Method4.4872()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method5() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G1_C13::Method5.MI.4874()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method6<object>() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G3_C1139::Method6.14543<System.Object>()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G1_C13::Method7.4871<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G2_C191::Method7.MI.6558<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G2_C191::Method7.MI.6558<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G3_C1139::Method4.MI.14541()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G3_C1139::Method5.14542()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G3_C1139::Method6.MI.14544<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C191`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C191`1<class BaseClass0>::ClassMethod1711<object>() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G2_C191::ClassMethod1711.6561<System.Object>()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C191`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C191`1<class BaseClass0>::ClassMethod1710() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G3_C1139::ClassMethod1710.MI.14548()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C191`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C191`1<class BaseClass0>::ClassMethod1709() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G3_C1139::ClassMethod1709.MI.14547()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C191`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C191`1<class BaseClass0>::Method7<object>() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G2_C191::Method7.6557<System.Object>()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C191`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C191`1<class BaseClass0>::Method6<object>() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G3_C1139::Method6.14543<System.Object>()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C191`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C191`1<class BaseClass0>::Method5() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G3_C1139::Method5.14542()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C191`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C191`1<class BaseClass0>::Method4() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G2_C191::Method4.6552()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C191`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C191`1<class BaseClass0>::ClassMethod1349<object>() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G1_C13::ClassMethod1349.4877<System.Object>()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C191`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C191`1<class BaseClass0>::ClassMethod1348<object>() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G2_C191::ClassMethod1348.MI.6562<System.Object>()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1139`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1139`1<class BaseClass0>::ClassMethod3676() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G3_C1139::ClassMethod3676.14546()" ldstr "class G3_C1139`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1139`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1139`1<class BaseClass0>::ClassMethod3675() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G3_C1139::ClassMethod3675.14545()" ldstr "class G3_C1139`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1139`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1139`1<class BaseClass0>::Method6<object>() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G3_C1139::Method6.14543<System.Object>()" ldstr "class G3_C1139`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1139`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1139`1<class BaseClass0>::Method5() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G3_C1139::Method5.14542()" ldstr "class G3_C1139`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1139`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1139`1<class BaseClass0>::Method4() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G3_C1139::Method4.14540()" ldstr "class G3_C1139`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1139`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1139`1<class BaseClass0>::ClassMethod1711<object>() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G2_C191::ClassMethod1711.6561<System.Object>()" ldstr "class G3_C1139`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1139`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1139`1<class BaseClass0>::ClassMethod1710() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G3_C1139::ClassMethod1710.MI.14548()" ldstr "class G3_C1139`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1139`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1139`1<class BaseClass0>::ClassMethod1709() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G3_C1139::ClassMethod1709.MI.14547()" ldstr "class G3_C1139`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1139`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1139`1<class BaseClass0>::Method7<object>() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G2_C191::Method7.6557<System.Object>()" ldstr "class G3_C1139`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1139`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1139`1<class BaseClass0>::ClassMethod1349<object>() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G1_C13::ClassMethod1349.4877<System.Object>()" ldstr "class G3_C1139`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1139`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1139`1<class BaseClass0>::ClassMethod1348<object>() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G2_C191::ClassMethod1348.MI.6562<System.Object>()" ldstr "class G3_C1139`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G3_C1139`1<class BaseClass1>::.ctor() stloc.0 ldloc.0 castclass class G1_C13`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C13`2<class BaseClass1,class BaseClass0>::ClassMethod1349<object>() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G1_C13::ClassMethod1349.4877<System.Object>()" ldstr "class G1_C13`2<class BaseClass1,class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C13`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C13`2<class BaseClass1,class BaseClass0>::ClassMethod1348<object>() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G2_C191::ClassMethod1348.MI.6562<System.Object>()" ldstr "class G1_C13`2<class BaseClass1,class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C13`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C13`2<class BaseClass1,class BaseClass0>::Method6<object>() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G3_C1139::Method6.14543<System.Object>()" ldstr "class G1_C13`2<class BaseClass1,class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C13`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C13`2<class BaseClass1,class BaseClass0>::Method5() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G1_C13::Method5.4873()" ldstr "class G1_C13`2<class BaseClass1,class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C13`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C13`2<class BaseClass1,class BaseClass0>::Method4() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G1_C13::Method4.4872()" ldstr "class G1_C13`2<class BaseClass1,class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C13`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C13`2<class BaseClass1,class BaseClass0>::Method7<object>() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G1_C13::Method7.4871<System.Object>()" ldstr "class G1_C13`2<class BaseClass1,class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G1_C13::Method7.4871<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method4() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G1_C13::Method4.4872()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method5() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G1_C13::Method5.MI.4874()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method6<object>() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G3_C1139::Method6.14543<System.Object>()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G1_C13::Method7.4871<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G2_C191::Method7.MI.6558<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G2_C191::Method7.MI.6558<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G3_C1139::Method4.MI.14541()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G3_C1139::Method5.14542()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G3_C1139::Method6.MI.14544<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C191`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C191`1<class BaseClass0>::ClassMethod1711<object>() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G2_C191::ClassMethod1711.6561<System.Object>()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C191`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C191`1<class BaseClass0>::ClassMethod1710() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G3_C1139::ClassMethod1710.MI.14548()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C191`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C191`1<class BaseClass0>::ClassMethod1709() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G3_C1139::ClassMethod1709.MI.14547()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C191`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C191`1<class BaseClass0>::Method7<object>() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G2_C191::Method7.6557<System.Object>()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C191`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C191`1<class BaseClass0>::Method6<object>() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G3_C1139::Method6.14543<System.Object>()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C191`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C191`1<class BaseClass0>::Method5() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G3_C1139::Method5.14542()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C191`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C191`1<class BaseClass0>::Method4() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G2_C191::Method4.6552()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C191`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C191`1<class BaseClass0>::ClassMethod1349<object>() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G1_C13::ClassMethod1349.4877<System.Object>()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C191`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C191`1<class BaseClass0>::ClassMethod1348<object>() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G2_C191::ClassMethod1348.MI.6562<System.Object>()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1139`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1139`1<class BaseClass1>::ClassMethod3676() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G3_C1139::ClassMethod3676.14546()" ldstr "class G3_C1139`1<class BaseClass1> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1139`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1139`1<class BaseClass1>::ClassMethod3675() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G3_C1139::ClassMethod3675.14545()" ldstr "class G3_C1139`1<class BaseClass1> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1139`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1139`1<class BaseClass1>::Method6<object>() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G3_C1139::Method6.14543<System.Object>()" ldstr "class G3_C1139`1<class BaseClass1> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1139`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1139`1<class BaseClass1>::Method5() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G3_C1139::Method5.14542()" ldstr "class G3_C1139`1<class BaseClass1> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1139`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1139`1<class BaseClass1>::Method4() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G3_C1139::Method4.14540()" ldstr "class G3_C1139`1<class BaseClass1> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1139`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1139`1<class BaseClass1>::ClassMethod1711<object>() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G2_C191::ClassMethod1711.6561<System.Object>()" ldstr "class G3_C1139`1<class BaseClass1> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1139`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1139`1<class BaseClass1>::ClassMethod1710() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G3_C1139::ClassMethod1710.MI.14548()" ldstr "class G3_C1139`1<class BaseClass1> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1139`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1139`1<class BaseClass1>::ClassMethod1709() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G3_C1139::ClassMethod1709.MI.14547()" ldstr "class G3_C1139`1<class BaseClass1> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1139`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1139`1<class BaseClass1>::Method7<object>() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G2_C191::Method7.6557<System.Object>()" ldstr "class G3_C1139`1<class BaseClass1> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1139`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1139`1<class BaseClass1>::ClassMethod1349<object>() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G1_C13::ClassMethod1349.4877<System.Object>()" ldstr "class G3_C1139`1<class BaseClass1> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1139`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1139`1<class BaseClass1>::ClassMethod1348<object>() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G2_C191::ClassMethod1348.MI.6562<System.Object>()" ldstr "class G3_C1139`1<class BaseClass1> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldstr "========================================================================\n\n" call void [mscorlib]System.Console::WriteLine(string) ret } .method public hidebysig static int32 Main() cil managed { .custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = ( 01 00 00 00 ) .entrypoint .maxstack 10 call void Generated667::MethodCallingTest() call void Generated667::ConstrainedCallsTest() call void Generated667::StructConstrainedInterfaceCallsTest() call void Generated667::CalliTest() ldc.i4 100 ret } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern mscorlib { .publickeytoken = (B7 7A 5C 56 19 34 E0 89 ) .ver 4:0:0:0 } .assembly extern TestFramework { .publickeytoken = ( B0 3F 5F 7F 11 D5 0A 3A ) } //TYPES IN FORWARDER ASSEMBLIES: //TEST ASSEMBLY: .assembly Generated667 { .hash algorithm 0x00008004 } .assembly extern xunit.core {} .class public BaseClass0 { .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void [mscorlib]System.Object::.ctor() ret } } .class public BaseClass1 extends BaseClass0 { .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void BaseClass0::.ctor() ret } } .class public G3_C1139`1<T0> extends class G2_C191`1<class BaseClass0> implements class IBase1`1<class BaseClass0> { .method public hidebysig newslot virtual instance string Method4() cil managed noinlining { ldstr "G3_C1139::Method4.14540()" ret } .method public hidebysig newslot virtual instance string 'IBase1<class BaseClass0>.Method4'() cil managed noinlining { .override method instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G3_C1139::Method4.MI.14541()" ret } .method public hidebysig virtual instance string Method5() cil managed noinlining { ldstr "G3_C1139::Method5.14542()" ret } .method public hidebysig virtual instance string Method6<M0>() cil managed noinlining { ldstr "G3_C1139::Method6.14543<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string 'IBase1<class BaseClass0>.Method6'<M0>() cil managed noinlining { .override method instance string class IBase1`1<class BaseClass0>::Method6<[1]>() ldstr "G3_C1139::Method6.MI.14544<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string ClassMethod3675() cil managed noinlining { ldstr "G3_C1139::ClassMethod3675.14545()" ret } .method public hidebysig newslot virtual instance string ClassMethod3676() cil managed noinlining { ldstr "G3_C1139::ClassMethod3676.14546()" ret } .method public hidebysig newslot virtual instance string 'G2_C191<class BaseClass0>.ClassMethod1709'() cil managed noinlining { .override method instance string class G2_C191`1<class BaseClass0>::ClassMethod1709() ldstr "G3_C1139::ClassMethod1709.MI.14547()" ret } .method public hidebysig newslot virtual instance string 'G2_C191<class BaseClass0>.ClassMethod1710'() cil managed noinlining { .override method instance string class G2_C191`1<class BaseClass0>::ClassMethod1710() ldstr "G3_C1139::ClassMethod1710.MI.14548()" ret } .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void class G2_C191`1<class BaseClass0>::.ctor() ret } } .class public abstract G2_C191`1<T0> extends class G1_C13`2<class BaseClass1,!T0> implements class IBase1`1<!T0>, class IBase2`2<class BaseClass1,class BaseClass1> { .method public hidebysig newslot virtual instance string Method4() cil managed noinlining { ldstr "G2_C191::Method4.6552()" ret } .method public hidebysig newslot virtual instance string 'IBase1<T0>.Method4'() cil managed noinlining { .override method instance string class IBase1`1<!T0>::Method4() ldstr "G2_C191::Method4.MI.6553()" ret } .method public hidebysig newslot virtual instance string Method5() cil managed noinlining { ldstr "G2_C191::Method5.6554()" ret } .method public hidebysig newslot virtual instance string 'IBase1<T0>.Method5'() cil managed noinlining { .override method instance string class IBase1`1<!T0>::Method5() ldstr "G2_C191::Method5.MI.6555()" ret } .method public hidebysig virtual instance string Method6<M0>() cil managed noinlining { ldstr "G2_C191::Method6.6556<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string Method7<M0>() cil managed noinlining { ldstr "G2_C191::Method7.6557<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string 'IBase2<class BaseClass1,class BaseClass1>.Method7'<M0>() cil managed noinlining { .override method instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<[1]>() ldstr "G2_C191::Method7.MI.6558<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string ClassMethod1709() cil managed noinlining { ldstr "G2_C191::ClassMethod1709.6559()" ret } .method public hidebysig newslot virtual instance string ClassMethod1710() cil managed noinlining { ldstr "G2_C191::ClassMethod1710.6560()" ret } .method public hidebysig newslot virtual instance string ClassMethod1711<M0>() cil managed noinlining { ldstr "G2_C191::ClassMethod1711.6561<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string 'G1_C13<class BaseClass1,T0>.ClassMethod1348'<M0>() cil managed noinlining { .override method instance string class G1_C13`2<class BaseClass1,!T0>::ClassMethod1348<[1]>() ldstr "G2_C191::ClassMethod1348.MI.6562<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void class G1_C13`2<class BaseClass1,!T0>::.ctor() ret } } .class interface public abstract IBase1`1<+T0> { .method public hidebysig newslot abstract virtual instance string Method4() cil managed { } .method public hidebysig newslot abstract virtual instance string Method5() cil managed { } .method public hidebysig newslot abstract virtual instance string Method6<M0>() cil managed { } } .class public abstract G1_C13`2<T0, T1> implements class IBase2`2<!T0,!T1>, class IBase1`1<!T0> { .method public hidebysig virtual instance string Method7<M0>() cil managed noinlining { ldstr "G1_C13::Method7.4871<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string Method4() cil managed noinlining { ldstr "G1_C13::Method4.4872()" ret } .method public hidebysig newslot virtual instance string Method5() cil managed noinlining { ldstr "G1_C13::Method5.4873()" ret } .method public hidebysig newslot virtual instance string 'IBase1<T0>.Method5'() cil managed noinlining { .override method instance string class IBase1`1<!T0>::Method5() ldstr "G1_C13::Method5.MI.4874()" ret } .method public hidebysig newslot virtual instance string Method6<M0>() cil managed noinlining { ldstr "G1_C13::Method6.4875<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string ClassMethod1348<M0>() cil managed noinlining { ldstr "G1_C13::ClassMethod1348.4876<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot virtual instance string ClassMethod1349<M0>() cil managed noinlining { ldstr "G1_C13::ClassMethod1349.4877<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void [mscorlib]System.Object::.ctor() ret } } .class interface public abstract IBase2`2<+T0, -T1> { .method public hidebysig newslot abstract virtual instance string Method7<M0>() cil managed { } } .class public auto ansi beforefieldinit Generated667 { .method static void M.BaseClass0<(BaseClass0)W>(!!W inst, string exp) cil managed { .maxstack 5 .locals init (string[] actualResults) ldc.i4.s 0 newarr string stloc.s actualResults ldarg.1 ldstr "M.BaseClass0<(BaseClass0)W>(!!W inst, string exp)" ldc.i4.s 0 ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.BaseClass1<(BaseClass1)W>(!!W inst, string exp) cil managed { .maxstack 5 .locals init (string[] actualResults) ldc.i4.s 0 newarr string stloc.s actualResults ldarg.1 ldstr "M.BaseClass1<(BaseClass1)W>(!!W inst, string exp)" ldc.i4.s 0 ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G3_C1139.T<T0,(class G3_C1139`1<!!T0>)W>(!!W 'inst', string exp) cil managed { .maxstack 16 .locals init (string[] actualResults) ldc.i4.s 11 newarr string stloc.s actualResults ldarg.1 ldstr "M.G3_C1139.T<T0,(class G3_C1139`1<!!T0>)W>(!!W 'inst', string exp)" ldc.i4.s 11 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1139`1<!!T0>::ClassMethod1348<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1139`1<!!T0>::ClassMethod1349<object>() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1139`1<!!T0>::ClassMethod1709() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1139`1<!!T0>::ClassMethod1710() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1139`1<!!T0>::ClassMethod1711<object>() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1139`1<!!T0>::ClassMethod3675() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1139`1<!!T0>::ClassMethod3676() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1139`1<!!T0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1139`1<!!T0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 9 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1139`1<!!T0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 10 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1139`1<!!T0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G3_C1139.A<(class G3_C1139`1<class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 16 .locals init (string[] actualResults) ldc.i4.s 11 newarr string stloc.s actualResults ldarg.1 ldstr "M.G3_C1139.A<(class G3_C1139`1<class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 11 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1139`1<class BaseClass0>::ClassMethod1348<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1139`1<class BaseClass0>::ClassMethod1349<object>() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1139`1<class BaseClass0>::ClassMethod1709() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1139`1<class BaseClass0>::ClassMethod1710() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1139`1<class BaseClass0>::ClassMethod1711<object>() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1139`1<class BaseClass0>::ClassMethod3675() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1139`1<class BaseClass0>::ClassMethod3676() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1139`1<class BaseClass0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1139`1<class BaseClass0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 9 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1139`1<class BaseClass0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 10 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1139`1<class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G3_C1139.B<(class G3_C1139`1<class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 16 .locals init (string[] actualResults) ldc.i4.s 11 newarr string stloc.s actualResults ldarg.1 ldstr "M.G3_C1139.B<(class G3_C1139`1<class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 11 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1139`1<class BaseClass1>::ClassMethod1348<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1139`1<class BaseClass1>::ClassMethod1349<object>() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1139`1<class BaseClass1>::ClassMethod1709() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1139`1<class BaseClass1>::ClassMethod1710() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1139`1<class BaseClass1>::ClassMethod1711<object>() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1139`1<class BaseClass1>::ClassMethod3675() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1139`1<class BaseClass1>::ClassMethod3676() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1139`1<class BaseClass1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1139`1<class BaseClass1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 9 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1139`1<class BaseClass1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 10 ldarga.s 0 constrained. !!W callvirt instance string class G3_C1139`1<class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C191.T<T0,(class G2_C191`1<!!T0>)W>(!!W 'inst', string exp) cil managed { .maxstack 14 .locals init (string[] actualResults) ldc.i4.s 9 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C191.T<T0,(class G2_C191`1<!!T0>)W>(!!W 'inst', string exp)" ldc.i4.s 9 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C191`1<!!T0>::ClassMethod1348<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C191`1<!!T0>::ClassMethod1349<object>() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C191`1<!!T0>::ClassMethod1709() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C191`1<!!T0>::ClassMethod1710() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C191`1<!!T0>::ClassMethod1711<object>() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G2_C191`1<!!T0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G2_C191`1<!!T0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G2_C191`1<!!T0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G2_C191`1<!!T0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C191.A<(class G2_C191`1<class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 14 .locals init (string[] actualResults) ldc.i4.s 9 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C191.A<(class G2_C191`1<class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 9 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C191`1<class BaseClass0>::ClassMethod1348<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C191`1<class BaseClass0>::ClassMethod1349<object>() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C191`1<class BaseClass0>::ClassMethod1709() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C191`1<class BaseClass0>::ClassMethod1710() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C191`1<class BaseClass0>::ClassMethod1711<object>() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G2_C191`1<class BaseClass0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G2_C191`1<class BaseClass0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G2_C191`1<class BaseClass0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G2_C191`1<class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G2_C191.B<(class G2_C191`1<class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 14 .locals init (string[] actualResults) ldc.i4.s 9 newarr string stloc.s actualResults ldarg.1 ldstr "M.G2_C191.B<(class G2_C191`1<class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 9 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G2_C191`1<class BaseClass1>::ClassMethod1348<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G2_C191`1<class BaseClass1>::ClassMethod1349<object>() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G2_C191`1<class BaseClass1>::ClassMethod1709() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G2_C191`1<class BaseClass1>::ClassMethod1710() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G2_C191`1<class BaseClass1>::ClassMethod1711<object>() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G2_C191`1<class BaseClass1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 6 ldarga.s 0 constrained. !!W callvirt instance string class G2_C191`1<class BaseClass1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 7 ldarga.s 0 constrained. !!W callvirt instance string class G2_C191`1<class BaseClass1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 8 ldarga.s 0 constrained. !!W callvirt instance string class G2_C191`1<class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase1.T<T0,(class IBase1`1<!!T0>)W>(!!W 'inst', string exp) cil managed { .maxstack 8 .locals init (string[] actualResults) ldc.i4.s 3 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase1.T<T0,(class IBase1`1<!!T0>)W>(!!W 'inst', string exp)" ldc.i4.s 3 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<!!T0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<!!T0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<!!T0>::Method6<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase1.A<(class IBase1`1<class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 8 .locals init (string[] actualResults) ldc.i4.s 3 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase1.A<(class IBase1`1<class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 3 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase1.B<(class IBase1`1<class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 8 .locals init (string[] actualResults) ldc.i4.s 3 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase1.B<(class IBase1`1<class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 3 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C13.T.T<T0,T1,(class G1_C13`2<!!T0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 11 .locals init (string[] actualResults) ldc.i4.s 6 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C13.T.T<T0,T1,(class G1_C13`2<!!T0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 6 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<!!T0,!!T1>::ClassMethod1348<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<!!T0,!!T1>::ClassMethod1349<object>() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<!!T0,!!T1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<!!T0,!!T1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<!!T0,!!T1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<!!T0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C13.A.T<T1,(class G1_C13`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 11 .locals init (string[] actualResults) ldc.i4.s 6 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C13.A.T<T1,(class G1_C13`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 6 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass0,!!T1>::ClassMethod1348<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass0,!!T1>::ClassMethod1349<object>() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass0,!!T1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass0,!!T1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass0,!!T1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C13.A.A<(class G1_C13`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 11 .locals init (string[] actualResults) ldc.i4.s 6 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C13.A.A<(class G1_C13`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 6 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass0,class BaseClass0>::ClassMethod1348<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass0,class BaseClass0>::ClassMethod1349<object>() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass0,class BaseClass0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass0,class BaseClass0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass0,class BaseClass0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass0,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C13.A.B<(class G1_C13`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 11 .locals init (string[] actualResults) ldc.i4.s 6 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C13.A.B<(class G1_C13`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 6 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass0,class BaseClass1>::ClassMethod1348<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass0,class BaseClass1>::ClassMethod1349<object>() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass0,class BaseClass1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass0,class BaseClass1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass0,class BaseClass1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass0,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C13.B.T<T1,(class G1_C13`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 11 .locals init (string[] actualResults) ldc.i4.s 6 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C13.B.T<T1,(class G1_C13`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 6 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass1,!!T1>::ClassMethod1348<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass1,!!T1>::ClassMethod1349<object>() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass1,!!T1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass1,!!T1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass1,!!T1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass1,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C13.B.A<(class G1_C13`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 11 .locals init (string[] actualResults) ldc.i4.s 6 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C13.B.A<(class G1_C13`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 6 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass1,class BaseClass0>::ClassMethod1348<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass1,class BaseClass0>::ClassMethod1349<object>() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass1,class BaseClass0>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass1,class BaseClass0>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass1,class BaseClass0>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass1,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.G1_C13.B.B<(class G1_C13`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 11 .locals init (string[] actualResults) ldc.i4.s 6 newarr string stloc.s actualResults ldarg.1 ldstr "M.G1_C13.B.B<(class G1_C13`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 6 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass1,class BaseClass1>::ClassMethod1348<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass1,class BaseClass1>::ClassMethod1349<object>() stelem.ref ldloc.s actualResults ldc.i4.s 2 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass1,class BaseClass1>::Method4() stelem.ref ldloc.s actualResults ldc.i4.s 3 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass1,class BaseClass1>::Method5() stelem.ref ldloc.s actualResults ldc.i4.s 4 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass1,class BaseClass1>::Method6<object>() stelem.ref ldloc.s actualResults ldc.i4.s 5 ldarga.s 0 constrained. !!W callvirt instance string class G1_C13`2<class BaseClass1,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.T.T<T0,T1,(class IBase2`2<!!T0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.T.T<T0,T1,(class IBase2`2<!!T0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<!!T0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.A.T<T1,(class IBase2`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.A.T<T1,(class IBase2`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.A.A<(class IBase2`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.A.A<(class IBase2`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.A.B<(class IBase2`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.A.B<(class IBase2`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.B.T<T1,(class IBase2`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.B.T<T1,(class IBase2`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass1,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.B.A<(class IBase2`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.B.A<(class IBase2`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.B.B<(class IBase2`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.B.B<(class IBase2`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method public hidebysig static void MethodCallingTest() cil managed { .maxstack 10 .locals init (object V_0) ldstr "========================== Method Calling Test ==========================" call void [mscorlib]System.Console::WriteLine(string) newobj instance void class G3_C1139`1<class BaseClass0>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C13`2<class BaseClass1,class BaseClass0> callvirt instance string class G1_C13`2<class BaseClass1,class BaseClass0>::ClassMethod1349<object>() ldstr "G1_C13::ClassMethod1349.4877<System.Object>()" ldstr "class G1_C13`2<class BaseClass1,class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C13`2<class BaseClass1,class BaseClass0> callvirt instance string class G1_C13`2<class BaseClass1,class BaseClass0>::ClassMethod1348<object>() ldstr "G2_C191::ClassMethod1348.MI.6562<System.Object>()" ldstr "class G1_C13`2<class BaseClass1,class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C13`2<class BaseClass1,class BaseClass0> callvirt instance string class G1_C13`2<class BaseClass1,class BaseClass0>::Method6<object>() ldstr "G3_C1139::Method6.14543<System.Object>()" ldstr "class G1_C13`2<class BaseClass1,class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C13`2<class BaseClass1,class BaseClass0> callvirt instance string class G1_C13`2<class BaseClass1,class BaseClass0>::Method5() ldstr "G1_C13::Method5.4873()" ldstr "class G1_C13`2<class BaseClass1,class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C13`2<class BaseClass1,class BaseClass0> callvirt instance string class G1_C13`2<class BaseClass1,class BaseClass0>::Method4() ldstr "G1_C13::Method4.4872()" ldstr "class G1_C13`2<class BaseClass1,class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C13`2<class BaseClass1,class BaseClass0> callvirt instance string class G1_C13`2<class BaseClass1,class BaseClass0>::Method7<object>() ldstr "G1_C13::Method7.4871<System.Object>()" ldstr "class G1_C13`2<class BaseClass1,class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() ldstr "G1_C13::Method7.4871<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass1>::Method4() ldstr "G1_C13::Method4.4872()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass1>::Method5() ldstr "G1_C13::Method5.MI.4874()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>() ldstr "G3_C1139::Method6.14543<System.Object>()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G1_C13::Method7.4871<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G2_C191::Method7.MI.6558<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G2_C191::Method7.MI.6558<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G3_C1139::Method4.MI.14541()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G3_C1139::Method5.14542()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G3_C1139::Method6.MI.14544<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G2_C191`1<class BaseClass0> callvirt instance string class G2_C191`1<class BaseClass0>::ClassMethod1711<object>() ldstr "G2_C191::ClassMethod1711.6561<System.Object>()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C191`1<class BaseClass0> callvirt instance string class G2_C191`1<class BaseClass0>::ClassMethod1710() ldstr "G3_C1139::ClassMethod1710.MI.14548()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C191`1<class BaseClass0> callvirt instance string class G2_C191`1<class BaseClass0>::ClassMethod1709() ldstr "G3_C1139::ClassMethod1709.MI.14547()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C191`1<class BaseClass0> callvirt instance string class G2_C191`1<class BaseClass0>::Method7<object>() ldstr "G2_C191::Method7.6557<System.Object>()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C191`1<class BaseClass0> callvirt instance string class G2_C191`1<class BaseClass0>::Method6<object>() ldstr "G3_C1139::Method6.14543<System.Object>()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C191`1<class BaseClass0> callvirt instance string class G2_C191`1<class BaseClass0>::Method5() ldstr "G3_C1139::Method5.14542()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C191`1<class BaseClass0> callvirt instance string class G2_C191`1<class BaseClass0>::Method4() ldstr "G2_C191::Method4.6552()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C191`1<class BaseClass0> callvirt instance string class G2_C191`1<class BaseClass0>::ClassMethod1349<object>() ldstr "G1_C13::ClassMethod1349.4877<System.Object>()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C191`1<class BaseClass0> callvirt instance string class G2_C191`1<class BaseClass0>::ClassMethod1348<object>() ldstr "G2_C191::ClassMethod1348.MI.6562<System.Object>()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G3_C1139`1<class BaseClass0> callvirt instance string class G3_C1139`1<class BaseClass0>::ClassMethod3676() ldstr "G3_C1139::ClassMethod3676.14546()" ldstr "class G3_C1139`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1139`1<class BaseClass0> callvirt instance string class G3_C1139`1<class BaseClass0>::ClassMethod3675() ldstr "G3_C1139::ClassMethod3675.14545()" ldstr "class G3_C1139`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1139`1<class BaseClass0> callvirt instance string class G3_C1139`1<class BaseClass0>::Method6<object>() ldstr "G3_C1139::Method6.14543<System.Object>()" ldstr "class G3_C1139`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1139`1<class BaseClass0> callvirt instance string class G3_C1139`1<class BaseClass0>::Method5() ldstr "G3_C1139::Method5.14542()" ldstr "class G3_C1139`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1139`1<class BaseClass0> callvirt instance string class G3_C1139`1<class BaseClass0>::Method4() ldstr "G3_C1139::Method4.14540()" ldstr "class G3_C1139`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1139`1<class BaseClass0> callvirt instance string class G3_C1139`1<class BaseClass0>::ClassMethod1711<object>() ldstr "G2_C191::ClassMethod1711.6561<System.Object>()" ldstr "class G3_C1139`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1139`1<class BaseClass0> callvirt instance string class G3_C1139`1<class BaseClass0>::ClassMethod1710() ldstr "G3_C1139::ClassMethod1710.MI.14548()" ldstr "class G3_C1139`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1139`1<class BaseClass0> callvirt instance string class G3_C1139`1<class BaseClass0>::ClassMethod1709() ldstr "G3_C1139::ClassMethod1709.MI.14547()" ldstr "class G3_C1139`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1139`1<class BaseClass0> callvirt instance string class G3_C1139`1<class BaseClass0>::Method7<object>() ldstr "G2_C191::Method7.6557<System.Object>()" ldstr "class G3_C1139`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1139`1<class BaseClass0> callvirt instance string class G3_C1139`1<class BaseClass0>::ClassMethod1349<object>() ldstr "G1_C13::ClassMethod1349.4877<System.Object>()" ldstr "class G3_C1139`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1139`1<class BaseClass0> callvirt instance string class G3_C1139`1<class BaseClass0>::ClassMethod1348<object>() ldstr "G2_C191::ClassMethod1348.MI.6562<System.Object>()" ldstr "class G3_C1139`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop newobj instance void class G3_C1139`1<class BaseClass1>::.ctor() stloc.0 ldloc.0 dup castclass class G1_C13`2<class BaseClass1,class BaseClass0> callvirt instance string class G1_C13`2<class BaseClass1,class BaseClass0>::ClassMethod1349<object>() ldstr "G1_C13::ClassMethod1349.4877<System.Object>()" ldstr "class G1_C13`2<class BaseClass1,class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C13`2<class BaseClass1,class BaseClass0> callvirt instance string class G1_C13`2<class BaseClass1,class BaseClass0>::ClassMethod1348<object>() ldstr "G2_C191::ClassMethod1348.MI.6562<System.Object>()" ldstr "class G1_C13`2<class BaseClass1,class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C13`2<class BaseClass1,class BaseClass0> callvirt instance string class G1_C13`2<class BaseClass1,class BaseClass0>::Method6<object>() ldstr "G3_C1139::Method6.14543<System.Object>()" ldstr "class G1_C13`2<class BaseClass1,class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C13`2<class BaseClass1,class BaseClass0> callvirt instance string class G1_C13`2<class BaseClass1,class BaseClass0>::Method5() ldstr "G1_C13::Method5.4873()" ldstr "class G1_C13`2<class BaseClass1,class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C13`2<class BaseClass1,class BaseClass0> callvirt instance string class G1_C13`2<class BaseClass1,class BaseClass0>::Method4() ldstr "G1_C13::Method4.4872()" ldstr "class G1_C13`2<class BaseClass1,class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G1_C13`2<class BaseClass1,class BaseClass0> callvirt instance string class G1_C13`2<class BaseClass1,class BaseClass0>::Method7<object>() ldstr "G1_C13::Method7.4871<System.Object>()" ldstr "class G1_C13`2<class BaseClass1,class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() ldstr "G1_C13::Method7.4871<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass1>::Method4() ldstr "G1_C13::Method4.4872()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass1>::Method5() ldstr "G1_C13::Method5.MI.4874()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>() ldstr "G3_C1139::Method6.14543<System.Object>()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "G1_C13::Method7.4871<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "G2_C191::Method7.MI.6558<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "G2_C191::Method7.MI.6558<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup callvirt instance string class IBase1`1<class BaseClass0>::Method4() ldstr "G3_C1139::Method4.MI.14541()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method5() ldstr "G3_C1139::Method5.14542()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>() ldstr "G3_C1139::Method6.MI.14544<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G2_C191`1<class BaseClass0> callvirt instance string class G2_C191`1<class BaseClass0>::ClassMethod1711<object>() ldstr "G2_C191::ClassMethod1711.6561<System.Object>()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C191`1<class BaseClass0> callvirt instance string class G2_C191`1<class BaseClass0>::ClassMethod1710() ldstr "G3_C1139::ClassMethod1710.MI.14548()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C191`1<class BaseClass0> callvirt instance string class G2_C191`1<class BaseClass0>::ClassMethod1709() ldstr "G3_C1139::ClassMethod1709.MI.14547()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C191`1<class BaseClass0> callvirt instance string class G2_C191`1<class BaseClass0>::Method7<object>() ldstr "G2_C191::Method7.6557<System.Object>()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C191`1<class BaseClass0> callvirt instance string class G2_C191`1<class BaseClass0>::Method6<object>() ldstr "G3_C1139::Method6.14543<System.Object>()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C191`1<class BaseClass0> callvirt instance string class G2_C191`1<class BaseClass0>::Method5() ldstr "G3_C1139::Method5.14542()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C191`1<class BaseClass0> callvirt instance string class G2_C191`1<class BaseClass0>::Method4() ldstr "G2_C191::Method4.6552()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C191`1<class BaseClass0> callvirt instance string class G2_C191`1<class BaseClass0>::ClassMethod1349<object>() ldstr "G1_C13::ClassMethod1349.4877<System.Object>()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G2_C191`1<class BaseClass0> callvirt instance string class G2_C191`1<class BaseClass0>::ClassMethod1348<object>() ldstr "G2_C191::ClassMethod1348.MI.6562<System.Object>()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc.0 dup castclass class G3_C1139`1<class BaseClass1> callvirt instance string class G3_C1139`1<class BaseClass1>::ClassMethod3676() ldstr "G3_C1139::ClassMethod3676.14546()" ldstr "class G3_C1139`1<class BaseClass1> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1139`1<class BaseClass1> callvirt instance string class G3_C1139`1<class BaseClass1>::ClassMethod3675() ldstr "G3_C1139::ClassMethod3675.14545()" ldstr "class G3_C1139`1<class BaseClass1> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1139`1<class BaseClass1> callvirt instance string class G3_C1139`1<class BaseClass1>::Method6<object>() ldstr "G3_C1139::Method6.14543<System.Object>()" ldstr "class G3_C1139`1<class BaseClass1> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1139`1<class BaseClass1> callvirt instance string class G3_C1139`1<class BaseClass1>::Method5() ldstr "G3_C1139::Method5.14542()" ldstr "class G3_C1139`1<class BaseClass1> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1139`1<class BaseClass1> callvirt instance string class G3_C1139`1<class BaseClass1>::Method4() ldstr "G3_C1139::Method4.14540()" ldstr "class G3_C1139`1<class BaseClass1> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1139`1<class BaseClass1> callvirt instance string class G3_C1139`1<class BaseClass1>::ClassMethod1711<object>() ldstr "G2_C191::ClassMethod1711.6561<System.Object>()" ldstr "class G3_C1139`1<class BaseClass1> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1139`1<class BaseClass1> callvirt instance string class G3_C1139`1<class BaseClass1>::ClassMethod1710() ldstr "G3_C1139::ClassMethod1710.MI.14548()" ldstr "class G3_C1139`1<class BaseClass1> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1139`1<class BaseClass1> callvirt instance string class G3_C1139`1<class BaseClass1>::ClassMethod1709() ldstr "G3_C1139::ClassMethod1709.MI.14547()" ldstr "class G3_C1139`1<class BaseClass1> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1139`1<class BaseClass1> callvirt instance string class G3_C1139`1<class BaseClass1>::Method7<object>() ldstr "G2_C191::Method7.6557<System.Object>()" ldstr "class G3_C1139`1<class BaseClass1> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1139`1<class BaseClass1> callvirt instance string class G3_C1139`1<class BaseClass1>::ClassMethod1349<object>() ldstr "G1_C13::ClassMethod1349.4877<System.Object>()" ldstr "class G3_C1139`1<class BaseClass1> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup castclass class G3_C1139`1<class BaseClass1> callvirt instance string class G3_C1139`1<class BaseClass1>::ClassMethod1348<object>() ldstr "G2_C191::ClassMethod1348.MI.6562<System.Object>()" ldstr "class G3_C1139`1<class BaseClass1> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldstr "========================================================================\n\n" call void [mscorlib]System.Console::WriteLine(string) ret } .method public hidebysig static void ConstrainedCallsTest() cil managed { .maxstack 10 .locals init (object V_0) ldstr "========================== Constrained Calls Test ==========================" call void [mscorlib]System.Console::WriteLine(string) newobj instance void class G3_C1139`1<class BaseClass0>::.ctor() stloc.0 ldloc.0 ldstr "G2_C191::ClassMethod1348.MI.6562<System.Object>()#G1_C13::ClassMethod1349.4877<System.Object>()#G1_C13::Method4.4872()#G1_C13::Method5.4873()#G3_C1139::Method6.14543<System.Object>()#G1_C13::Method7.4871<System.Object>()#" call void Generated667::M.G1_C13.T.T<class BaseClass1,class BaseClass0,class G3_C1139`1<class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C191::ClassMethod1348.MI.6562<System.Object>()#G1_C13::ClassMethod1349.4877<System.Object>()#G1_C13::Method4.4872()#G1_C13::Method5.4873()#G3_C1139::Method6.14543<System.Object>()#G1_C13::Method7.4871<System.Object>()#" call void Generated667::M.G1_C13.B.T<class BaseClass0,class G3_C1139`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C191::ClassMethod1348.MI.6562<System.Object>()#G1_C13::ClassMethod1349.4877<System.Object>()#G1_C13::Method4.4872()#G1_C13::Method5.4873()#G3_C1139::Method6.14543<System.Object>()#G1_C13::Method7.4871<System.Object>()#" call void Generated667::M.G1_C13.B.A<class G3_C1139`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C13::Method7.4871<System.Object>()#" call void Generated667::M.IBase2.T.T<class BaseClass1,class BaseClass0,class G3_C1139`1<class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C13::Method7.4871<System.Object>()#" call void Generated667::M.IBase2.B.T<class BaseClass0,class G3_C1139`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C13::Method7.4871<System.Object>()#" call void Generated667::M.IBase2.B.A<class G3_C1139`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C13::Method4.4872()#G1_C13::Method5.MI.4874()#G3_C1139::Method6.14543<System.Object>()#" call void Generated667::M.IBase1.T<class BaseClass1,class G3_C1139`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C13::Method4.4872()#G1_C13::Method5.MI.4874()#G3_C1139::Method6.14543<System.Object>()#" call void Generated667::M.IBase1.B<class G3_C1139`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G1_C13::Method7.4871<System.Object>()#" call void Generated667::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G3_C1139`1<class BaseClass0>>(!!2,string) ldloc.0 ldstr "G1_C13::Method7.4871<System.Object>()#" call void Generated667::M.IBase2.A.T<class BaseClass0,class G3_C1139`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G1_C13::Method7.4871<System.Object>()#" call void Generated667::M.IBase2.A.A<class G3_C1139`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C191::Method7.MI.6558<System.Object>()#" call void Generated667::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G3_C1139`1<class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C191::Method7.MI.6558<System.Object>()#" call void Generated667::M.IBase2.A.T<class BaseClass1,class G3_C1139`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C191::Method7.MI.6558<System.Object>()#" call void Generated667::M.IBase2.A.B<class G3_C1139`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C191::Method7.MI.6558<System.Object>()#" call void Generated667::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G3_C1139`1<class BaseClass0>>(!!2,string) ldloc.0 ldstr "G2_C191::Method7.MI.6558<System.Object>()#" call void Generated667::M.IBase2.B.T<class BaseClass1,class G3_C1139`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C191::Method7.MI.6558<System.Object>()#" call void Generated667::M.IBase2.B.B<class G3_C1139`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G3_C1139::Method4.MI.14541()#G3_C1139::Method5.14542()#G3_C1139::Method6.MI.14544<System.Object>()#" call void Generated667::M.IBase1.T<class BaseClass0,class G3_C1139`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G3_C1139::Method4.MI.14541()#G3_C1139::Method5.14542()#G3_C1139::Method6.MI.14544<System.Object>()#" call void Generated667::M.IBase1.A<class G3_C1139`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C191::ClassMethod1348.MI.6562<System.Object>()#G1_C13::ClassMethod1349.4877<System.Object>()#G3_C1139::ClassMethod1709.MI.14547()#G3_C1139::ClassMethod1710.MI.14548()#G2_C191::ClassMethod1711.6561<System.Object>()#G2_C191::Method4.6552()#G3_C1139::Method5.14542()#G3_C1139::Method6.14543<System.Object>()#G2_C191::Method7.6557<System.Object>()#" call void Generated667::M.G2_C191.T<class BaseClass0,class G3_C1139`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C191::ClassMethod1348.MI.6562<System.Object>()#G1_C13::ClassMethod1349.4877<System.Object>()#G3_C1139::ClassMethod1709.MI.14547()#G3_C1139::ClassMethod1710.MI.14548()#G2_C191::ClassMethod1711.6561<System.Object>()#G2_C191::Method4.6552()#G3_C1139::Method5.14542()#G3_C1139::Method6.14543<System.Object>()#G2_C191::Method7.6557<System.Object>()#" call void Generated667::M.G2_C191.A<class G3_C1139`1<class BaseClass0>>(!!0,string) ldloc.0 ldstr "G2_C191::ClassMethod1348.MI.6562<System.Object>()#G1_C13::ClassMethod1349.4877<System.Object>()#G3_C1139::ClassMethod1709.MI.14547()#G3_C1139::ClassMethod1710.MI.14548()#G2_C191::ClassMethod1711.6561<System.Object>()#G3_C1139::ClassMethod3675.14545()#G3_C1139::ClassMethod3676.14546()#G3_C1139::Method4.14540()#G3_C1139::Method5.14542()#G3_C1139::Method6.14543<System.Object>()#G2_C191::Method7.6557<System.Object>()#" call void Generated667::M.G3_C1139.T<class BaseClass0,class G3_C1139`1<class BaseClass0>>(!!1,string) ldloc.0 ldstr "G2_C191::ClassMethod1348.MI.6562<System.Object>()#G1_C13::ClassMethod1349.4877<System.Object>()#G3_C1139::ClassMethod1709.MI.14547()#G3_C1139::ClassMethod1710.MI.14548()#G2_C191::ClassMethod1711.6561<System.Object>()#G3_C1139::ClassMethod3675.14545()#G3_C1139::ClassMethod3676.14546()#G3_C1139::Method4.14540()#G3_C1139::Method5.14542()#G3_C1139::Method6.14543<System.Object>()#G2_C191::Method7.6557<System.Object>()#" call void Generated667::M.G3_C1139.A<class G3_C1139`1<class BaseClass0>>(!!0,string) newobj instance void class G3_C1139`1<class BaseClass1>::.ctor() stloc.0 ldloc.0 ldstr "G2_C191::ClassMethod1348.MI.6562<System.Object>()#G1_C13::ClassMethod1349.4877<System.Object>()#G1_C13::Method4.4872()#G1_C13::Method5.4873()#G3_C1139::Method6.14543<System.Object>()#G1_C13::Method7.4871<System.Object>()#" call void Generated667::M.G1_C13.T.T<class BaseClass1,class BaseClass0,class G3_C1139`1<class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C191::ClassMethod1348.MI.6562<System.Object>()#G1_C13::ClassMethod1349.4877<System.Object>()#G1_C13::Method4.4872()#G1_C13::Method5.4873()#G3_C1139::Method6.14543<System.Object>()#G1_C13::Method7.4871<System.Object>()#" call void Generated667::M.G1_C13.B.T<class BaseClass0,class G3_C1139`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C191::ClassMethod1348.MI.6562<System.Object>()#G1_C13::ClassMethod1349.4877<System.Object>()#G1_C13::Method4.4872()#G1_C13::Method5.4873()#G3_C1139::Method6.14543<System.Object>()#G1_C13::Method7.4871<System.Object>()#" call void Generated667::M.G1_C13.B.A<class G3_C1139`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C13::Method7.4871<System.Object>()#" call void Generated667::M.IBase2.T.T<class BaseClass1,class BaseClass0,class G3_C1139`1<class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C13::Method7.4871<System.Object>()#" call void Generated667::M.IBase2.B.T<class BaseClass0,class G3_C1139`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C13::Method7.4871<System.Object>()#" call void Generated667::M.IBase2.B.A<class G3_C1139`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C13::Method4.4872()#G1_C13::Method5.MI.4874()#G3_C1139::Method6.14543<System.Object>()#" call void Generated667::M.IBase1.T<class BaseClass1,class G3_C1139`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C13::Method4.4872()#G1_C13::Method5.MI.4874()#G3_C1139::Method6.14543<System.Object>()#" call void Generated667::M.IBase1.B<class G3_C1139`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G1_C13::Method7.4871<System.Object>()#" call void Generated667::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G3_C1139`1<class BaseClass1>>(!!2,string) ldloc.0 ldstr "G1_C13::Method7.4871<System.Object>()#" call void Generated667::M.IBase2.A.T<class BaseClass0,class G3_C1139`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G1_C13::Method7.4871<System.Object>()#" call void Generated667::M.IBase2.A.A<class G3_C1139`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C191::Method7.MI.6558<System.Object>()#" call void Generated667::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G3_C1139`1<class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C191::Method7.MI.6558<System.Object>()#" call void Generated667::M.IBase2.A.T<class BaseClass1,class G3_C1139`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C191::Method7.MI.6558<System.Object>()#" call void Generated667::M.IBase2.A.B<class G3_C1139`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C191::Method7.MI.6558<System.Object>()#" call void Generated667::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G3_C1139`1<class BaseClass1>>(!!2,string) ldloc.0 ldstr "G2_C191::Method7.MI.6558<System.Object>()#" call void Generated667::M.IBase2.B.T<class BaseClass1,class G3_C1139`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C191::Method7.MI.6558<System.Object>()#" call void Generated667::M.IBase2.B.B<class G3_C1139`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G3_C1139::Method4.MI.14541()#G3_C1139::Method5.14542()#G3_C1139::Method6.MI.14544<System.Object>()#" call void Generated667::M.IBase1.T<class BaseClass0,class G3_C1139`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G3_C1139::Method4.MI.14541()#G3_C1139::Method5.14542()#G3_C1139::Method6.MI.14544<System.Object>()#" call void Generated667::M.IBase1.A<class G3_C1139`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C191::ClassMethod1348.MI.6562<System.Object>()#G1_C13::ClassMethod1349.4877<System.Object>()#G3_C1139::ClassMethod1709.MI.14547()#G3_C1139::ClassMethod1710.MI.14548()#G2_C191::ClassMethod1711.6561<System.Object>()#G2_C191::Method4.6552()#G3_C1139::Method5.14542()#G3_C1139::Method6.14543<System.Object>()#G2_C191::Method7.6557<System.Object>()#" call void Generated667::M.G2_C191.T<class BaseClass0,class G3_C1139`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C191::ClassMethod1348.MI.6562<System.Object>()#G1_C13::ClassMethod1349.4877<System.Object>()#G3_C1139::ClassMethod1709.MI.14547()#G3_C1139::ClassMethod1710.MI.14548()#G2_C191::ClassMethod1711.6561<System.Object>()#G2_C191::Method4.6552()#G3_C1139::Method5.14542()#G3_C1139::Method6.14543<System.Object>()#G2_C191::Method7.6557<System.Object>()#" call void Generated667::M.G2_C191.A<class G3_C1139`1<class BaseClass1>>(!!0,string) ldloc.0 ldstr "G2_C191::ClassMethod1348.MI.6562<System.Object>()#G1_C13::ClassMethod1349.4877<System.Object>()#G3_C1139::ClassMethod1709.MI.14547()#G3_C1139::ClassMethod1710.MI.14548()#G2_C191::ClassMethod1711.6561<System.Object>()#G3_C1139::ClassMethod3675.14545()#G3_C1139::ClassMethod3676.14546()#G3_C1139::Method4.14540()#G3_C1139::Method5.14542()#G3_C1139::Method6.14543<System.Object>()#G2_C191::Method7.6557<System.Object>()#" call void Generated667::M.G3_C1139.T<class BaseClass1,class G3_C1139`1<class BaseClass1>>(!!1,string) ldloc.0 ldstr "G2_C191::ClassMethod1348.MI.6562<System.Object>()#G1_C13::ClassMethod1349.4877<System.Object>()#G3_C1139::ClassMethod1709.MI.14547()#G3_C1139::ClassMethod1710.MI.14548()#G2_C191::ClassMethod1711.6561<System.Object>()#G3_C1139::ClassMethod3675.14545()#G3_C1139::ClassMethod3676.14546()#G3_C1139::Method4.14540()#G3_C1139::Method5.14542()#G3_C1139::Method6.14543<System.Object>()#G2_C191::Method7.6557<System.Object>()#" call void Generated667::M.G3_C1139.B<class G3_C1139`1<class BaseClass1>>(!!0,string) ldstr "========================================================================\n\n" call void [mscorlib]System.Console::WriteLine(string) ret } .method public hidebysig static void StructConstrainedInterfaceCallsTest() cil managed { .maxstack 10 ldstr "===================== Struct Constrained Interface Calls Test =====================" call void [mscorlib]System.Console::WriteLine(string) ldstr "========================================================================\n\n" call void [mscorlib]System.Console::WriteLine(string) ret } .method public hidebysig static void CalliTest() cil managed { .maxstack 10 .locals init (object V_0) ldstr "========================== Method Calli Test ==========================" call void [mscorlib]System.Console::WriteLine(string) newobj instance void class G3_C1139`1<class BaseClass0>::.ctor() stloc.0 ldloc.0 castclass class G1_C13`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C13`2<class BaseClass1,class BaseClass0>::ClassMethod1349<object>() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G1_C13::ClassMethod1349.4877<System.Object>()" ldstr "class G1_C13`2<class BaseClass1,class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C13`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C13`2<class BaseClass1,class BaseClass0>::ClassMethod1348<object>() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G2_C191::ClassMethod1348.MI.6562<System.Object>()" ldstr "class G1_C13`2<class BaseClass1,class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C13`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C13`2<class BaseClass1,class BaseClass0>::Method6<object>() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G3_C1139::Method6.14543<System.Object>()" ldstr "class G1_C13`2<class BaseClass1,class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C13`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C13`2<class BaseClass1,class BaseClass0>::Method5() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G1_C13::Method5.4873()" ldstr "class G1_C13`2<class BaseClass1,class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C13`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C13`2<class BaseClass1,class BaseClass0>::Method4() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G1_C13::Method4.4872()" ldstr "class G1_C13`2<class BaseClass1,class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C13`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C13`2<class BaseClass1,class BaseClass0>::Method7<object>() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G1_C13::Method7.4871<System.Object>()" ldstr "class G1_C13`2<class BaseClass1,class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G1_C13::Method7.4871<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method4() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G1_C13::Method4.4872()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method5() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G1_C13::Method5.MI.4874()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method6<object>() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G3_C1139::Method6.14543<System.Object>()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G1_C13::Method7.4871<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G2_C191::Method7.MI.6558<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G2_C191::Method7.MI.6558<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G3_C1139::Method4.MI.14541()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G3_C1139::Method5.14542()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G3_C1139::Method6.MI.14544<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C191`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C191`1<class BaseClass0>::ClassMethod1711<object>() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G2_C191::ClassMethod1711.6561<System.Object>()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C191`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C191`1<class BaseClass0>::ClassMethod1710() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G3_C1139::ClassMethod1710.MI.14548()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C191`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C191`1<class BaseClass0>::ClassMethod1709() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G3_C1139::ClassMethod1709.MI.14547()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C191`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C191`1<class BaseClass0>::Method7<object>() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G2_C191::Method7.6557<System.Object>()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C191`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C191`1<class BaseClass0>::Method6<object>() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G3_C1139::Method6.14543<System.Object>()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C191`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C191`1<class BaseClass0>::Method5() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G3_C1139::Method5.14542()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C191`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C191`1<class BaseClass0>::Method4() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G2_C191::Method4.6552()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C191`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C191`1<class BaseClass0>::ClassMethod1349<object>() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G1_C13::ClassMethod1349.4877<System.Object>()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C191`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C191`1<class BaseClass0>::ClassMethod1348<object>() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G2_C191::ClassMethod1348.MI.6562<System.Object>()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1139`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1139`1<class BaseClass0>::ClassMethod3676() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G3_C1139::ClassMethod3676.14546()" ldstr "class G3_C1139`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1139`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1139`1<class BaseClass0>::ClassMethod3675() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G3_C1139::ClassMethod3675.14545()" ldstr "class G3_C1139`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1139`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1139`1<class BaseClass0>::Method6<object>() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G3_C1139::Method6.14543<System.Object>()" ldstr "class G3_C1139`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1139`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1139`1<class BaseClass0>::Method5() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G3_C1139::Method5.14542()" ldstr "class G3_C1139`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1139`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1139`1<class BaseClass0>::Method4() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G3_C1139::Method4.14540()" ldstr "class G3_C1139`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1139`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1139`1<class BaseClass0>::ClassMethod1711<object>() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G2_C191::ClassMethod1711.6561<System.Object>()" ldstr "class G3_C1139`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1139`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1139`1<class BaseClass0>::ClassMethod1710() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G3_C1139::ClassMethod1710.MI.14548()" ldstr "class G3_C1139`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1139`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1139`1<class BaseClass0>::ClassMethod1709() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G3_C1139::ClassMethod1709.MI.14547()" ldstr "class G3_C1139`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1139`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1139`1<class BaseClass0>::Method7<object>() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G2_C191::Method7.6557<System.Object>()" ldstr "class G3_C1139`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1139`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1139`1<class BaseClass0>::ClassMethod1349<object>() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G1_C13::ClassMethod1349.4877<System.Object>()" ldstr "class G3_C1139`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1139`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G3_C1139`1<class BaseClass0>::ClassMethod1348<object>() calli default string(class G3_C1139`1<class BaseClass0>) ldstr "G2_C191::ClassMethod1348.MI.6562<System.Object>()" ldstr "class G3_C1139`1<class BaseClass0> on type class G3_C1139`1<class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) newobj instance void class G3_C1139`1<class BaseClass1>::.ctor() stloc.0 ldloc.0 castclass class G1_C13`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C13`2<class BaseClass1,class BaseClass0>::ClassMethod1349<object>() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G1_C13::ClassMethod1349.4877<System.Object>()" ldstr "class G1_C13`2<class BaseClass1,class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C13`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C13`2<class BaseClass1,class BaseClass0>::ClassMethod1348<object>() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G2_C191::ClassMethod1348.MI.6562<System.Object>()" ldstr "class G1_C13`2<class BaseClass1,class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C13`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C13`2<class BaseClass1,class BaseClass0>::Method6<object>() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G3_C1139::Method6.14543<System.Object>()" ldstr "class G1_C13`2<class BaseClass1,class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C13`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C13`2<class BaseClass1,class BaseClass0>::Method5() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G1_C13::Method5.4873()" ldstr "class G1_C13`2<class BaseClass1,class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C13`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C13`2<class BaseClass1,class BaseClass0>::Method4() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G1_C13::Method4.4872()" ldstr "class G1_C13`2<class BaseClass1,class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G1_C13`2<class BaseClass1,class BaseClass0> ldloc.0 ldvirtftn instance string class G1_C13`2<class BaseClass1,class BaseClass0>::Method7<object>() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G1_C13::Method7.4871<System.Object>()" ldstr "class G1_C13`2<class BaseClass1,class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G1_C13::Method7.4871<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method4() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G1_C13::Method4.4872()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method5() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G1_C13::Method5.MI.4874()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass1>::Method6<object>() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G3_C1139::Method6.14543<System.Object>()" ldstr "class IBase1`1<class BaseClass1> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G1_C13::Method7.4871<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G2_C191::Method7.MI.6558<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G2_C191::Method7.MI.6558<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G3_C1139::Method4.MI.14541()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G3_C1139::Method5.14542()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 ldloc.0 ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G3_C1139::Method6.MI.14544<System.Object>()" ldstr "class IBase1`1<class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C191`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C191`1<class BaseClass0>::ClassMethod1711<object>() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G2_C191::ClassMethod1711.6561<System.Object>()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C191`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C191`1<class BaseClass0>::ClassMethod1710() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G3_C1139::ClassMethod1710.MI.14548()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C191`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C191`1<class BaseClass0>::ClassMethod1709() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G3_C1139::ClassMethod1709.MI.14547()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C191`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C191`1<class BaseClass0>::Method7<object>() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G2_C191::Method7.6557<System.Object>()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C191`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C191`1<class BaseClass0>::Method6<object>() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G3_C1139::Method6.14543<System.Object>()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C191`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C191`1<class BaseClass0>::Method5() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G3_C1139::Method5.14542()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C191`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C191`1<class BaseClass0>::Method4() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G2_C191::Method4.6552()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C191`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C191`1<class BaseClass0>::ClassMethod1349<object>() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G1_C13::ClassMethod1349.4877<System.Object>()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G2_C191`1<class BaseClass0> ldloc.0 ldvirtftn instance string class G2_C191`1<class BaseClass0>::ClassMethod1348<object>() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G2_C191::ClassMethod1348.MI.6562<System.Object>()" ldstr "class G2_C191`1<class BaseClass0> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1139`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1139`1<class BaseClass1>::ClassMethod3676() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G3_C1139::ClassMethod3676.14546()" ldstr "class G3_C1139`1<class BaseClass1> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1139`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1139`1<class BaseClass1>::ClassMethod3675() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G3_C1139::ClassMethod3675.14545()" ldstr "class G3_C1139`1<class BaseClass1> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1139`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1139`1<class BaseClass1>::Method6<object>() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G3_C1139::Method6.14543<System.Object>()" ldstr "class G3_C1139`1<class BaseClass1> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1139`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1139`1<class BaseClass1>::Method5() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G3_C1139::Method5.14542()" ldstr "class G3_C1139`1<class BaseClass1> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1139`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1139`1<class BaseClass1>::Method4() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G3_C1139::Method4.14540()" ldstr "class G3_C1139`1<class BaseClass1> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1139`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1139`1<class BaseClass1>::ClassMethod1711<object>() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G2_C191::ClassMethod1711.6561<System.Object>()" ldstr "class G3_C1139`1<class BaseClass1> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1139`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1139`1<class BaseClass1>::ClassMethod1710() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G3_C1139::ClassMethod1710.MI.14548()" ldstr "class G3_C1139`1<class BaseClass1> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1139`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1139`1<class BaseClass1>::ClassMethod1709() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G3_C1139::ClassMethod1709.MI.14547()" ldstr "class G3_C1139`1<class BaseClass1> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1139`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1139`1<class BaseClass1>::Method7<object>() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G2_C191::Method7.6557<System.Object>()" ldstr "class G3_C1139`1<class BaseClass1> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1139`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1139`1<class BaseClass1>::ClassMethod1349<object>() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G1_C13::ClassMethod1349.4877<System.Object>()" ldstr "class G3_C1139`1<class BaseClass1> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc.0 castclass class G3_C1139`1<class BaseClass1> ldloc.0 ldvirtftn instance string class G3_C1139`1<class BaseClass1>::ClassMethod1348<object>() calli default string(class G3_C1139`1<class BaseClass1>) ldstr "G2_C191::ClassMethod1348.MI.6562<System.Object>()" ldstr "class G3_C1139`1<class BaseClass1> on type class G3_C1139`1<class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldstr "========================================================================\n\n" call void [mscorlib]System.Console::WriteLine(string) ret } .method public hidebysig static int32 Main() cil managed { .custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = ( 01 00 00 00 ) .entrypoint .maxstack 10 call void Generated667::MethodCallingTest() call void Generated667::ConstrainedCallsTest() call void Generated667::StructConstrainedInterfaceCallsTest() call void Generated667::CalliTest() ldc.i4 100 ret } }
-1
dotnet/runtime
66,372
Add Stopwatch.GetElapsedTime
Fixes https://github.com/dotnet/runtime/issues/65858
stephentoub
2022-03-09T01:52:28Z
2022-03-09T12:42:15Z
ca731545a58307870a0baebb0ee43eeea61f175f
c9f7f7389e8e9a00d501aef696333b67d218baac
Add Stopwatch.GetElapsedTime. Fixes https://github.com/dotnet/runtime/issues/65858
./src/tests/JIT/IL_Conformance/Old/Conformance_Base/bgt_i8.il
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern legacy library mscorlib {} .class public _bgt { .field public static int64 min .field public static int64 _one .field public static int64 zero .field public static int64 one .field public static int64 max .field public static int64 odd .field public static int64 even .method public static void initialize() { .maxstack 10 ldc.i8 0x8000000000000000 stsfld int64 _bgt::min ldc.i8 0xFFFFFFFFFFFFFFFF stsfld int64 _bgt::_one ldc.i8 0x0000000000000000 stsfld int64 _bgt::zero ldc.i8 0x0000000000000001 stsfld int64 _bgt::one ldc.i8 0x7FFFFFFFFFFFFFFF stsfld int64 _bgt::max ldc.i8 0x5555555555555555 stsfld int64 _bgt::odd ldc.i8 0xAAAAAAAAAAAAAAAA stsfld int64 _bgt::even ret } .method public static int32 main(class [mscorlib]System.String[]) { .entrypoint .maxstack 10 call void _bgt::initialize() ldsfld int64 _bgt::min ldsfld int64 _bgt::min bgt FAIL ldsfld int64 _bgt::min ldsfld int64 _bgt::_one bgt FAIL ldsfld int64 _bgt::min ldsfld int64 _bgt::zero bgt FAIL ldsfld int64 _bgt::min ldsfld int64 _bgt::one bgt FAIL ldsfld int64 _bgt::min ldsfld int64 _bgt::max bgt FAIL ldsfld int64 _bgt::min ldsfld int64 _bgt::odd bgt FAIL ldsfld int64 _bgt::min ldsfld int64 _bgt::even bgt FAIL ldsfld int64 _bgt::_one ldsfld int64 _bgt::min bgt A br FAIL A: ldsfld int64 _bgt::_one ldsfld int64 _bgt::_one bgt FAIL ldsfld int64 _bgt::_one ldsfld int64 _bgt::zero bgt FAIL ldsfld int64 _bgt::_one ldsfld int64 _bgt::one bgt FAIL ldsfld int64 _bgt::_one ldsfld int64 _bgt::max bgt FAIL ldsfld int64 _bgt::_one ldsfld int64 _bgt::odd bgt FAIL ldsfld int64 _bgt::_one ldsfld int64 _bgt::even bgt _A br FAIL _A: ldsfld int64 _bgt::zero ldsfld int64 _bgt::min bgt B br FAIL B: ldsfld int64 _bgt::zero ldsfld int64 _bgt::_one bgt C br FAIL C: ldsfld int64 _bgt::zero ldsfld int64 _bgt::zero bgt FAIL ldsfld int64 _bgt::zero ldsfld int64 _bgt::one bgt FAIL ldsfld int64 _bgt::zero ldsfld int64 _bgt::max bgt FAIL ldsfld int64 _bgt::zero ldsfld int64 _bgt::odd bgt FAIL ldsfld int64 _bgt::zero ldsfld int64 _bgt::even bgt _C br FAIL _C: ldsfld int64 _bgt::one ldsfld int64 _bgt::min bgt D br FAIL D: ldsfld int64 _bgt::one ldsfld int64 _bgt::_one bgt E br FAIL E: ldsfld int64 _bgt::one ldsfld int64 _bgt::zero bgt F br FAIL F: ldsfld int64 _bgt::one ldsfld int64 _bgt::one bgt FAIL ldsfld int64 _bgt::one ldsfld int64 _bgt::max bgt FAIL ldsfld int64 _bgt::one ldsfld int64 _bgt::odd bgt FAIL ldsfld int64 _bgt::one ldsfld int64 _bgt::even bgt _F br FAIL _F: ldsfld int64 _bgt::max ldsfld int64 _bgt::min bgt G br FAIL G: ldsfld int64 _bgt::max ldsfld int64 _bgt::_one bgt H br FAIL H: ldsfld int64 _bgt::max ldsfld int64 _bgt::zero bgt I br FAIL I: ldsfld int64 _bgt::max ldsfld int64 _bgt::one bgt J br FAIL J: ldsfld int64 _bgt::max ldsfld int64 _bgt::max bgt FAIL ldsfld int64 _bgt::max ldsfld int64 _bgt::odd bgt K br FAIL K: ldsfld int64 _bgt::max ldsfld int64 _bgt::even bgt L br FAIL L: ldsfld int64 _bgt::odd ldsfld int64 _bgt::min bgt M br FAIL M: ldsfld int64 _bgt::odd ldsfld int64 _bgt::_one bgt N br FAIL N: ldsfld int64 _bgt::odd ldsfld int64 _bgt::zero bgt O br FAIL O: ldsfld int64 _bgt::odd ldsfld int64 _bgt::one bgt P br FAIL P: ldsfld int64 _bgt::odd ldsfld int64 _bgt::max bgt FAIL ldsfld int64 _bgt::odd ldsfld int64 _bgt::odd bgt FAIL ldsfld int64 _bgt::odd ldsfld int64 _bgt::even bgt Q br FAIL Q: ldsfld int64 _bgt::even ldsfld int64 _bgt::min bgt R br FAIL R: ldsfld int64 _bgt::even ldsfld int64 _bgt::_one bgt FAIL ldsfld int64 _bgt::even ldsfld int64 _bgt::zero bgt FAIL ldsfld int64 _bgt::even ldsfld int64 _bgt::one bgt FAIL ldsfld int64 _bgt::even ldsfld int64 _bgt::max bgt FAIL ldsfld int64 _bgt::even ldsfld int64 _bgt::odd bgt FAIL ldsfld int64 _bgt::even ldsfld int64 _bgt::even bgt FAIL br BACKCHECK TOPASS: br PASS BACKCHECK: ldc.i8 0x1 ldc.i8 0x0 bgt TOPASS br FAIL PASS: ldc.i4 100 ret FAIL: ldc.i4 0x0 ret } } .assembly bgt_i8{}
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern legacy library mscorlib {} .class public _bgt { .field public static int64 min .field public static int64 _one .field public static int64 zero .field public static int64 one .field public static int64 max .field public static int64 odd .field public static int64 even .method public static void initialize() { .maxstack 10 ldc.i8 0x8000000000000000 stsfld int64 _bgt::min ldc.i8 0xFFFFFFFFFFFFFFFF stsfld int64 _bgt::_one ldc.i8 0x0000000000000000 stsfld int64 _bgt::zero ldc.i8 0x0000000000000001 stsfld int64 _bgt::one ldc.i8 0x7FFFFFFFFFFFFFFF stsfld int64 _bgt::max ldc.i8 0x5555555555555555 stsfld int64 _bgt::odd ldc.i8 0xAAAAAAAAAAAAAAAA stsfld int64 _bgt::even ret } .method public static int32 main(class [mscorlib]System.String[]) { .entrypoint .maxstack 10 call void _bgt::initialize() ldsfld int64 _bgt::min ldsfld int64 _bgt::min bgt FAIL ldsfld int64 _bgt::min ldsfld int64 _bgt::_one bgt FAIL ldsfld int64 _bgt::min ldsfld int64 _bgt::zero bgt FAIL ldsfld int64 _bgt::min ldsfld int64 _bgt::one bgt FAIL ldsfld int64 _bgt::min ldsfld int64 _bgt::max bgt FAIL ldsfld int64 _bgt::min ldsfld int64 _bgt::odd bgt FAIL ldsfld int64 _bgt::min ldsfld int64 _bgt::even bgt FAIL ldsfld int64 _bgt::_one ldsfld int64 _bgt::min bgt A br FAIL A: ldsfld int64 _bgt::_one ldsfld int64 _bgt::_one bgt FAIL ldsfld int64 _bgt::_one ldsfld int64 _bgt::zero bgt FAIL ldsfld int64 _bgt::_one ldsfld int64 _bgt::one bgt FAIL ldsfld int64 _bgt::_one ldsfld int64 _bgt::max bgt FAIL ldsfld int64 _bgt::_one ldsfld int64 _bgt::odd bgt FAIL ldsfld int64 _bgt::_one ldsfld int64 _bgt::even bgt _A br FAIL _A: ldsfld int64 _bgt::zero ldsfld int64 _bgt::min bgt B br FAIL B: ldsfld int64 _bgt::zero ldsfld int64 _bgt::_one bgt C br FAIL C: ldsfld int64 _bgt::zero ldsfld int64 _bgt::zero bgt FAIL ldsfld int64 _bgt::zero ldsfld int64 _bgt::one bgt FAIL ldsfld int64 _bgt::zero ldsfld int64 _bgt::max bgt FAIL ldsfld int64 _bgt::zero ldsfld int64 _bgt::odd bgt FAIL ldsfld int64 _bgt::zero ldsfld int64 _bgt::even bgt _C br FAIL _C: ldsfld int64 _bgt::one ldsfld int64 _bgt::min bgt D br FAIL D: ldsfld int64 _bgt::one ldsfld int64 _bgt::_one bgt E br FAIL E: ldsfld int64 _bgt::one ldsfld int64 _bgt::zero bgt F br FAIL F: ldsfld int64 _bgt::one ldsfld int64 _bgt::one bgt FAIL ldsfld int64 _bgt::one ldsfld int64 _bgt::max bgt FAIL ldsfld int64 _bgt::one ldsfld int64 _bgt::odd bgt FAIL ldsfld int64 _bgt::one ldsfld int64 _bgt::even bgt _F br FAIL _F: ldsfld int64 _bgt::max ldsfld int64 _bgt::min bgt G br FAIL G: ldsfld int64 _bgt::max ldsfld int64 _bgt::_one bgt H br FAIL H: ldsfld int64 _bgt::max ldsfld int64 _bgt::zero bgt I br FAIL I: ldsfld int64 _bgt::max ldsfld int64 _bgt::one bgt J br FAIL J: ldsfld int64 _bgt::max ldsfld int64 _bgt::max bgt FAIL ldsfld int64 _bgt::max ldsfld int64 _bgt::odd bgt K br FAIL K: ldsfld int64 _bgt::max ldsfld int64 _bgt::even bgt L br FAIL L: ldsfld int64 _bgt::odd ldsfld int64 _bgt::min bgt M br FAIL M: ldsfld int64 _bgt::odd ldsfld int64 _bgt::_one bgt N br FAIL N: ldsfld int64 _bgt::odd ldsfld int64 _bgt::zero bgt O br FAIL O: ldsfld int64 _bgt::odd ldsfld int64 _bgt::one bgt P br FAIL P: ldsfld int64 _bgt::odd ldsfld int64 _bgt::max bgt FAIL ldsfld int64 _bgt::odd ldsfld int64 _bgt::odd bgt FAIL ldsfld int64 _bgt::odd ldsfld int64 _bgt::even bgt Q br FAIL Q: ldsfld int64 _bgt::even ldsfld int64 _bgt::min bgt R br FAIL R: ldsfld int64 _bgt::even ldsfld int64 _bgt::_one bgt FAIL ldsfld int64 _bgt::even ldsfld int64 _bgt::zero bgt FAIL ldsfld int64 _bgt::even ldsfld int64 _bgt::one bgt FAIL ldsfld int64 _bgt::even ldsfld int64 _bgt::max bgt FAIL ldsfld int64 _bgt::even ldsfld int64 _bgt::odd bgt FAIL ldsfld int64 _bgt::even ldsfld int64 _bgt::even bgt FAIL br BACKCHECK TOPASS: br PASS BACKCHECK: ldc.i8 0x1 ldc.i8 0x0 bgt TOPASS br FAIL PASS: ldc.i4 100 ret FAIL: ldc.i4 0x0 ret } } .assembly bgt_i8{}
-1
dotnet/runtime
66,372
Add Stopwatch.GetElapsedTime
Fixes https://github.com/dotnet/runtime/issues/65858
stephentoub
2022-03-09T01:52:28Z
2022-03-09T12:42:15Z
ca731545a58307870a0baebb0ee43eeea61f175f
c9f7f7389e8e9a00d501aef696333b67d218baac
Add Stopwatch.GetElapsedTime. Fixes https://github.com/dotnet/runtime/issues/65858
./src/coreclr/tools/aot/ILCompiler.TypeSystem.Tests/CanonicalizationTests.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using Internal.TypeSystem; using Xunit; namespace TypeSystemTests { public class CanonicalizationTests { private TestTypeSystemContext _context; private ModuleDesc _testModule; private MetadataType _referenceType; private MetadataType _otherReferenceType; private MetadataType _structType; private MetadataType _otherStructType; private MetadataType _genericReferenceType; private MetadataType _genericStructType; private MetadataType _genericReferenceTypeWithThreeParams; private MetadataType _genericStructTypeWithThreeParams; public CanonicalizationTests() { _context = new TestTypeSystemContext(TargetArchitecture.Unknown); var systemModule = _context.CreateModuleForSimpleName("CoreTestAssembly"); _context.SetSystemModule(systemModule); _testModule = systemModule; _referenceType = _testModule.GetType("Canonicalization", "ReferenceType"); _otherReferenceType = _testModule.GetType("Canonicalization", "OtherReferenceType"); _structType = _testModule.GetType("Canonicalization", "StructType"); _otherStructType = _testModule.GetType("Canonicalization", "OtherStructType"); _genericReferenceType = _testModule.GetType("Canonicalization", "GenericReferenceType`1"); _genericStructType = _testModule.GetType("Canonicalization", "GenericStructType`1"); _genericReferenceTypeWithThreeParams = _testModule.GetType("Canonicalization", "GenericReferenceTypeWithThreeParams`3"); _genericStructTypeWithThreeParams = _testModule.GetType("Canonicalization", "GenericStructTypeWithThreeParams`3"); } [Theory] [InlineData(CanonicalizationMode.Standard)] [InlineData(CanonicalizationMode.RuntimeDetermined)] public void TestGenericTypes(CanonicalizationMode algorithmType) { _context.CanonMode = algorithmType; // Canonical forms of reference type over two different reference types are equivalent var referenceOverReference = _genericReferenceType.MakeInstantiatedType(_referenceType); var referenceOverOtherReference = _genericReferenceType.MakeInstantiatedType(_otherReferenceType); Assert.Same( referenceOverReference.ConvertToCanonForm(CanonicalFormKind.Specific), referenceOverOtherReference.ConvertToCanonForm(CanonicalFormKind.Specific)); Assert.Same( referenceOverReference.ConvertToCanonForm(CanonicalFormKind.Universal), referenceOverOtherReference.ConvertToCanonForm(CanonicalFormKind.Universal)); var referenceOverReferenceOverReference = _genericReferenceType.MakeInstantiatedType(referenceOverReference); Assert.Same( referenceOverReference.ConvertToCanonForm(CanonicalFormKind.Specific), referenceOverReferenceOverReference.ConvertToCanonForm(CanonicalFormKind.Specific)); Assert.Same( referenceOverReference.ConvertToCanonForm(CanonicalFormKind.Universal), referenceOverReferenceOverReference.ConvertToCanonForm(CanonicalFormKind.Universal)); var threeParamReferenceOverS1R1S1 = _genericReferenceTypeWithThreeParams.MakeInstantiatedType( _structType, _referenceType, _structType); var threeParamReferenceOverS1R2S1 = _genericReferenceTypeWithThreeParams.MakeInstantiatedType( _structType, _otherReferenceType, _structType); var threeParamReferenceOverS1R2S2 = _genericReferenceTypeWithThreeParams.MakeInstantiatedType( _structType, _otherReferenceType, _otherStructType); Assert.Same( threeParamReferenceOverS1R1S1.ConvertToCanonForm(CanonicalFormKind.Specific), threeParamReferenceOverS1R2S1.ConvertToCanonForm(CanonicalFormKind.Specific)); Assert.Same( threeParamReferenceOverS1R1S1.ConvertToCanonForm(CanonicalFormKind.Universal), threeParamReferenceOverS1R2S1.ConvertToCanonForm(CanonicalFormKind.Universal)); Assert.Same( threeParamReferenceOverS1R1S1.ConvertToCanonForm(CanonicalFormKind.Universal), threeParamReferenceOverS1R2S2.ConvertToCanonForm(CanonicalFormKind.Universal)); // Universal canonical forms of reference type over reference and value types are equivalent var referenceOverStruct = _genericReferenceType.MakeInstantiatedType(_structType); var referenceOverOtherStruct = _genericReferenceType.MakeInstantiatedType(_otherStructType); Assert.Same( referenceOverStruct.ConvertToCanonForm(CanonicalFormKind.Universal), referenceOverOtherStruct.ConvertToCanonForm(CanonicalFormKind.Universal)); // Canon forms of reference type instantiated over a generic valuetype over any reference type var genericStructOverReference = _genericStructType.MakeInstantiatedType(_referenceType); var genericStructOverOtherReference = _genericStructType.MakeInstantiatedType(_otherReferenceType); var referenceOverGenericStructOverReference = _genericReferenceType.MakeInstantiatedType(genericStructOverReference); var referenceOverGenericStructOverOtherReference = _genericReferenceType.MakeInstantiatedType(genericStructOverOtherReference); Assert.Same( referenceOverGenericStructOverReference.ConvertToCanonForm(CanonicalFormKind.Specific), referenceOverGenericStructOverOtherReference.ConvertToCanonForm(CanonicalFormKind.Specific)); Assert.NotSame( referenceOverGenericStructOverReference.ConvertToCanonForm(CanonicalFormKind.Specific), referenceOverReference.ConvertToCanonForm(CanonicalFormKind.Specific)); Assert.Same( referenceOverGenericStructOverReference.ConvertToCanonForm(CanonicalFormKind.Universal), referenceOverGenericStructOverOtherReference.ConvertToCanonForm(CanonicalFormKind.Universal)); Assert.Same( referenceOverGenericStructOverReference.ConvertToCanonForm(CanonicalFormKind.Universal), referenceOverReference.ConvertToCanonForm(CanonicalFormKind.Universal)); // Canon of a type instantiated over a signature variable is the same type when just canonicalizing as specific, // but the universal canon form when performing universal canonicalization. var genericStructOverSignatureVariable = _genericStructType.MakeInstantiatedType(_context.GetSignatureVariable(0, false)); Assert.Same( genericStructOverSignatureVariable, genericStructOverSignatureVariable.ConvertToCanonForm(CanonicalFormKind.Specific)); Assert.NotSame( genericStructOverSignatureVariable, genericStructOverSignatureVariable.ConvertToCanonForm(CanonicalFormKind.Universal)); } [Theory] [InlineData(CanonicalizationMode.Standard)] [InlineData(CanonicalizationMode.RuntimeDetermined)] public void TestGenericTypesNegative(CanonicalizationMode algorithmType) { _context.CanonMode = algorithmType; // Two different types instantiated over the same type are not canonically equivalent var referenceOverReference = _genericReferenceType.MakeInstantiatedType(_referenceType); var structOverReference = _genericStructType.MakeInstantiatedType(_referenceType); Assert.NotSame( referenceOverReference.ConvertToCanonForm(CanonicalFormKind.Specific), structOverReference.ConvertToCanonForm(CanonicalFormKind.Specific)); Assert.NotSame( referenceOverReference.ConvertToCanonForm(CanonicalFormKind.Universal), structOverReference.ConvertToCanonForm(CanonicalFormKind.Universal)); // Specific canonical forms of reference type over reference and value types are not equivalent var referenceOverStruct = _genericReferenceType.MakeInstantiatedType(_structType); var referenceOverOtherStruct = _genericReferenceType.MakeInstantiatedType(_otherStructType); Assert.NotSame( referenceOverStruct.ConvertToCanonForm(CanonicalFormKind.Specific), referenceOverOtherStruct.ConvertToCanonForm(CanonicalFormKind.Specific)); var threeParamReferenceOverS1R2S1 = _genericReferenceTypeWithThreeParams.MakeInstantiatedType( _structType, _otherReferenceType, _structType); var threeParamReferenceOverS1R2S2 = _genericReferenceTypeWithThreeParams.MakeInstantiatedType( _structType, _otherReferenceType, _otherStructType); Assert.NotSame( threeParamReferenceOverS1R2S1.ConvertToCanonForm(CanonicalFormKind.Specific), threeParamReferenceOverS1R2S2.ConvertToCanonForm(CanonicalFormKind.Specific)); } [Theory] [InlineData(CanonicalizationMode.Standard)] [InlineData(CanonicalizationMode.RuntimeDetermined)] public void TestArrayTypes(CanonicalizationMode algorithmType) { _context.CanonMode = algorithmType; // Generic type instantiated over an array has the same canonical form as generic type over any other reference type var genericStructOverArrayOfInt = _genericStructType.MakeInstantiatedType(_context.GetWellKnownType(WellKnownType.Int32).MakeArrayType()); var genericStructOverReferenceType = _genericStructType.MakeInstantiatedType(_referenceType); Assert.Same( genericStructOverArrayOfInt.ConvertToCanonForm(CanonicalFormKind.Specific), genericStructOverReferenceType.ConvertToCanonForm(CanonicalFormKind.Specific)); Assert.Same( genericStructOverArrayOfInt.ConvertToCanonForm(CanonicalFormKind.Universal), genericStructOverReferenceType.ConvertToCanonForm(CanonicalFormKind.Universal)); // Canonical form of SzArray and Multidim array are not the same var arrayOfReferenceType = _referenceType.MakeArrayType(); var mdArrayOfReferenceType = _referenceType.MakeArrayType(1); Assert.NotSame( arrayOfReferenceType.ConvertToCanonForm(CanonicalFormKind.Specific), mdArrayOfReferenceType.ConvertToCanonForm(CanonicalFormKind.Specific)); Assert.NotSame( arrayOfReferenceType.ConvertToCanonForm(CanonicalFormKind.Universal), mdArrayOfReferenceType.ConvertToCanonForm(CanonicalFormKind.Universal)); // Canonical forms of arrays over different reference types are same var arrayOfOtherReferenceType = _otherReferenceType.MakeArrayType(); Assert.Same( arrayOfReferenceType.ConvertToCanonForm(CanonicalFormKind.Specific), arrayOfOtherReferenceType.ConvertToCanonForm(CanonicalFormKind.Specific)); Assert.Same( arrayOfReferenceType.ConvertToCanonForm(CanonicalFormKind.Universal), arrayOfOtherReferenceType.ConvertToCanonForm(CanonicalFormKind.Universal)); // Canonical forms of arrays of value types are only same for universal canon form var arrayOfStruct = _structType.MakeArrayType(); Assert.NotSame( arrayOfReferenceType.ConvertToCanonForm(CanonicalFormKind.Specific), arrayOfStruct.ConvertToCanonForm(CanonicalFormKind.Specific)); Assert.Same( arrayOfReferenceType.ConvertToCanonForm(CanonicalFormKind.Universal), arrayOfStruct.ConvertToCanonForm(CanonicalFormKind.Universal)); } [Theory] [InlineData(CanonicalizationMode.Standard)] [InlineData(CanonicalizationMode.RuntimeDetermined)] public void TestMethodsOnGenericTypes(CanonicalizationMode algorithmType) { _context.CanonMode = algorithmType; var referenceOverReference = _genericReferenceType.MakeInstantiatedType(_referenceType); var referenceOverOtherReference = _genericReferenceType.MakeInstantiatedType(_otherReferenceType); Assert.NotSame( referenceOverReference.GetMethod("Method", null), referenceOverOtherReference.GetMethod("Method", null)); Assert.Same( referenceOverReference.GetMethod("Method", null).GetCanonMethodTarget(CanonicalFormKind.Specific), referenceOverOtherReference.GetMethod("Method", null).GetCanonMethodTarget(CanonicalFormKind.Specific)); Assert.Same( referenceOverReference.GetMethod("Method", null).GetCanonMethodTarget(CanonicalFormKind.Universal), referenceOverOtherReference.GetMethod("Method", null).GetCanonMethodTarget(CanonicalFormKind.Universal)); var referenceOverStruct = _genericReferenceType.MakeInstantiatedType(_structType); Assert.NotSame( referenceOverReference.GetMethod("Method", null).GetCanonMethodTarget(CanonicalFormKind.Specific), referenceOverStruct.GetMethod("Method", null).GetCanonMethodTarget(CanonicalFormKind.Specific)); Assert.Same( referenceOverReference.GetMethod("Method", null).GetCanonMethodTarget(CanonicalFormKind.Universal), referenceOverStruct.GetMethod("Method", null).GetCanonMethodTarget(CanonicalFormKind.Universal)); Assert.Same( referenceOverReference.GetMethod("GenericMethod", null).MakeInstantiatedMethod(_referenceType).GetCanonMethodTarget(CanonicalFormKind.Specific), referenceOverOtherReference.GetMethod("GenericMethod", null).MakeInstantiatedMethod(_otherReferenceType).GetCanonMethodTarget(CanonicalFormKind.Specific)); Assert.Same( referenceOverReference.GetMethod("GenericMethod", null).MakeInstantiatedMethod(_referenceType).GetCanonMethodTarget(CanonicalFormKind.Universal), referenceOverOtherReference.GetMethod("GenericMethod", null).MakeInstantiatedMethod(_otherReferenceType).GetCanonMethodTarget(CanonicalFormKind.Universal)); Assert.NotSame( referenceOverReference.GetMethod("GenericMethod", null).MakeInstantiatedMethod(_referenceType).GetCanonMethodTarget(CanonicalFormKind.Specific), referenceOverOtherReference.GetMethod("GenericMethod", null).MakeInstantiatedMethod(_structType).GetCanonMethodTarget(CanonicalFormKind.Specific)); Assert.Same( referenceOverReference.GetMethod("GenericMethod", null).MakeInstantiatedMethod(_referenceType).GetCanonMethodTarget(CanonicalFormKind.Universal), referenceOverOtherReference.GetMethod("GenericMethod", null).MakeInstantiatedMethod(_structType).GetCanonMethodTarget(CanonicalFormKind.Universal)); Assert.NotSame( referenceOverStruct.GetMethod("GenericMethod", null).MakeInstantiatedMethod(_referenceType).GetCanonMethodTarget(CanonicalFormKind.Specific), referenceOverOtherReference.GetMethod("GenericMethod", null).MakeInstantiatedMethod(_structType).GetCanonMethodTarget(CanonicalFormKind.Specific)); Assert.Same( referenceOverStruct.GetMethod("GenericMethod", null).MakeInstantiatedMethod(_referenceType).GetCanonMethodTarget(CanonicalFormKind.Universal), referenceOverOtherReference.GetMethod("GenericMethod", null).MakeInstantiatedMethod(_structType).GetCanonMethodTarget(CanonicalFormKind.Universal)); Assert.NotSame( referenceOverReference.GetMethod("GenericMethod", null).MakeInstantiatedMethod(_structType), referenceOverReference.GetMethod("GenericMethod", null).MakeInstantiatedMethod(_structType).GetCanonMethodTarget(CanonicalFormKind.Specific)); } [Theory] [InlineData(CanonicalizationMode.Standard)] [InlineData(CanonicalizationMode.RuntimeDetermined)] public void TestArrayMethods(CanonicalizationMode algorithmType) { _context.CanonMode = algorithmType; var arrayOfReferenceType = _referenceType.MakeArrayType(1); var arrayOfOtherReferenceType = _otherReferenceType.MakeArrayType(1); Assert.Same( arrayOfReferenceType.GetMethod("Set", null).GetCanonMethodTarget(CanonicalFormKind.Specific), arrayOfOtherReferenceType.GetMethod("Set", null).GetCanonMethodTarget(CanonicalFormKind.Specific)); Assert.Same( arrayOfReferenceType.GetMethod("Set", null).GetCanonMethodTarget(CanonicalFormKind.Universal), arrayOfOtherReferenceType.GetMethod("Set", null).GetCanonMethodTarget(CanonicalFormKind.Universal)); var arrayOfStruct = _structType.MakeArrayType(1); Assert.NotSame( arrayOfReferenceType.GetMethod("Set", null).GetCanonMethodTarget(CanonicalFormKind.Specific), arrayOfStruct.GetMethod("Set", null).GetCanonMethodTarget(CanonicalFormKind.Specific)); Assert.Same( arrayOfReferenceType.GetMethod("Set", null).GetCanonMethodTarget(CanonicalFormKind.Universal), arrayOfStruct.GetMethod("Set", null).GetCanonMethodTarget(CanonicalFormKind.Universal)); } [Theory] [InlineData(CanonicalizationMode.Standard)] [InlineData(CanonicalizationMode.RuntimeDetermined)] public void TestUpgradeToUniversalCanon(CanonicalizationMode algorithmType) { _context.CanonMode = algorithmType; var gstOverUniversalCanon = _genericStructType.MakeInstantiatedType(_context.UniversalCanonType); var grtOverRtRtStOverUniversal = _genericReferenceTypeWithThreeParams.MakeInstantiatedType( _referenceType, _referenceType, gstOverUniversalCanon); var grtOverRtRtStOverUniversalCanon = grtOverRtRtStOverUniversal.ConvertToCanonForm(CanonicalFormKind.Specific); // Specific form gets upgraded to universal in the presence of universal canon. // GenericReferenceTypeWithThreeParams<ReferenceType, ReferenceType, GenericStructType<__UniversalCanon>> is // GenericReferenceTypeWithThreeParams<T__UniversalCanon, U__UniversalCanon, V__UniversalCanon> Assert.Same(_context.UniversalCanonType, grtOverRtRtStOverUniversalCanon.Instantiation[0]); Assert.Same(_context.UniversalCanonType, grtOverRtRtStOverUniversalCanon.Instantiation[2]); } [Theory] [InlineData(CanonicalizationMode.Standard)] [InlineData(CanonicalizationMode.RuntimeDetermined)] public void TestDowngradeFromUniversalCanon(CanonicalizationMode algorithmType) { _context.CanonMode = algorithmType; var grtOverUniversalCanon = _genericReferenceType.MakeInstantiatedType(_context.UniversalCanonType); var gstOverGrtOverUniversalCanon = _genericStructType.MakeInstantiatedType(grtOverUniversalCanon); var gstOverCanon = _genericStructType.MakeInstantiatedType(_context.CanonType); Assert.Same(gstOverCanon, gstOverGrtOverUniversalCanon.ConvertToCanonForm(CanonicalFormKind.Specific)); var gstOverGstOverGrtOverUniversalCanon = _genericStructType.MakeInstantiatedType(gstOverGrtOverUniversalCanon); var gstOverGstOverCanon = _genericStructType.MakeInstantiatedType(gstOverCanon); Assert.Same(gstOverGstOverCanon, gstOverGstOverGrtOverUniversalCanon.ConvertToCanonForm(CanonicalFormKind.Specific)); } [Fact] public void TestCanonicalizationOfRuntimeDeterminedUniversalGeneric() { var gstOverUniversalCanon = _genericStructType.MakeInstantiatedType(_context.UniversalCanonType); var rdtUniversalCanon = (RuntimeDeterminedType)gstOverUniversalCanon.ConvertToSharedRuntimeDeterminedForm().Instantiation[0]; Assert.Same(_context.UniversalCanonType, rdtUniversalCanon.CanonicalType); var gstOverRdtUniversalCanon = _genericStructType.MakeInstantiatedType(rdtUniversalCanon); Assert.Same(gstOverUniversalCanon, gstOverRdtUniversalCanon.ConvertToCanonForm(CanonicalFormKind.Specific)); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using Internal.TypeSystem; using Xunit; namespace TypeSystemTests { public class CanonicalizationTests { private TestTypeSystemContext _context; private ModuleDesc _testModule; private MetadataType _referenceType; private MetadataType _otherReferenceType; private MetadataType _structType; private MetadataType _otherStructType; private MetadataType _genericReferenceType; private MetadataType _genericStructType; private MetadataType _genericReferenceTypeWithThreeParams; private MetadataType _genericStructTypeWithThreeParams; public CanonicalizationTests() { _context = new TestTypeSystemContext(TargetArchitecture.Unknown); var systemModule = _context.CreateModuleForSimpleName("CoreTestAssembly"); _context.SetSystemModule(systemModule); _testModule = systemModule; _referenceType = _testModule.GetType("Canonicalization", "ReferenceType"); _otherReferenceType = _testModule.GetType("Canonicalization", "OtherReferenceType"); _structType = _testModule.GetType("Canonicalization", "StructType"); _otherStructType = _testModule.GetType("Canonicalization", "OtherStructType"); _genericReferenceType = _testModule.GetType("Canonicalization", "GenericReferenceType`1"); _genericStructType = _testModule.GetType("Canonicalization", "GenericStructType`1"); _genericReferenceTypeWithThreeParams = _testModule.GetType("Canonicalization", "GenericReferenceTypeWithThreeParams`3"); _genericStructTypeWithThreeParams = _testModule.GetType("Canonicalization", "GenericStructTypeWithThreeParams`3"); } [Theory] [InlineData(CanonicalizationMode.Standard)] [InlineData(CanonicalizationMode.RuntimeDetermined)] public void TestGenericTypes(CanonicalizationMode algorithmType) { _context.CanonMode = algorithmType; // Canonical forms of reference type over two different reference types are equivalent var referenceOverReference = _genericReferenceType.MakeInstantiatedType(_referenceType); var referenceOverOtherReference = _genericReferenceType.MakeInstantiatedType(_otherReferenceType); Assert.Same( referenceOverReference.ConvertToCanonForm(CanonicalFormKind.Specific), referenceOverOtherReference.ConvertToCanonForm(CanonicalFormKind.Specific)); Assert.Same( referenceOverReference.ConvertToCanonForm(CanonicalFormKind.Universal), referenceOverOtherReference.ConvertToCanonForm(CanonicalFormKind.Universal)); var referenceOverReferenceOverReference = _genericReferenceType.MakeInstantiatedType(referenceOverReference); Assert.Same( referenceOverReference.ConvertToCanonForm(CanonicalFormKind.Specific), referenceOverReferenceOverReference.ConvertToCanonForm(CanonicalFormKind.Specific)); Assert.Same( referenceOverReference.ConvertToCanonForm(CanonicalFormKind.Universal), referenceOverReferenceOverReference.ConvertToCanonForm(CanonicalFormKind.Universal)); var threeParamReferenceOverS1R1S1 = _genericReferenceTypeWithThreeParams.MakeInstantiatedType( _structType, _referenceType, _structType); var threeParamReferenceOverS1R2S1 = _genericReferenceTypeWithThreeParams.MakeInstantiatedType( _structType, _otherReferenceType, _structType); var threeParamReferenceOverS1R2S2 = _genericReferenceTypeWithThreeParams.MakeInstantiatedType( _structType, _otherReferenceType, _otherStructType); Assert.Same( threeParamReferenceOverS1R1S1.ConvertToCanonForm(CanonicalFormKind.Specific), threeParamReferenceOverS1R2S1.ConvertToCanonForm(CanonicalFormKind.Specific)); Assert.Same( threeParamReferenceOverS1R1S1.ConvertToCanonForm(CanonicalFormKind.Universal), threeParamReferenceOverS1R2S1.ConvertToCanonForm(CanonicalFormKind.Universal)); Assert.Same( threeParamReferenceOverS1R1S1.ConvertToCanonForm(CanonicalFormKind.Universal), threeParamReferenceOverS1R2S2.ConvertToCanonForm(CanonicalFormKind.Universal)); // Universal canonical forms of reference type over reference and value types are equivalent var referenceOverStruct = _genericReferenceType.MakeInstantiatedType(_structType); var referenceOverOtherStruct = _genericReferenceType.MakeInstantiatedType(_otherStructType); Assert.Same( referenceOverStruct.ConvertToCanonForm(CanonicalFormKind.Universal), referenceOverOtherStruct.ConvertToCanonForm(CanonicalFormKind.Universal)); // Canon forms of reference type instantiated over a generic valuetype over any reference type var genericStructOverReference = _genericStructType.MakeInstantiatedType(_referenceType); var genericStructOverOtherReference = _genericStructType.MakeInstantiatedType(_otherReferenceType); var referenceOverGenericStructOverReference = _genericReferenceType.MakeInstantiatedType(genericStructOverReference); var referenceOverGenericStructOverOtherReference = _genericReferenceType.MakeInstantiatedType(genericStructOverOtherReference); Assert.Same( referenceOverGenericStructOverReference.ConvertToCanonForm(CanonicalFormKind.Specific), referenceOverGenericStructOverOtherReference.ConvertToCanonForm(CanonicalFormKind.Specific)); Assert.NotSame( referenceOverGenericStructOverReference.ConvertToCanonForm(CanonicalFormKind.Specific), referenceOverReference.ConvertToCanonForm(CanonicalFormKind.Specific)); Assert.Same( referenceOverGenericStructOverReference.ConvertToCanonForm(CanonicalFormKind.Universal), referenceOverGenericStructOverOtherReference.ConvertToCanonForm(CanonicalFormKind.Universal)); Assert.Same( referenceOverGenericStructOverReference.ConvertToCanonForm(CanonicalFormKind.Universal), referenceOverReference.ConvertToCanonForm(CanonicalFormKind.Universal)); // Canon of a type instantiated over a signature variable is the same type when just canonicalizing as specific, // but the universal canon form when performing universal canonicalization. var genericStructOverSignatureVariable = _genericStructType.MakeInstantiatedType(_context.GetSignatureVariable(0, false)); Assert.Same( genericStructOverSignatureVariable, genericStructOverSignatureVariable.ConvertToCanonForm(CanonicalFormKind.Specific)); Assert.NotSame( genericStructOverSignatureVariable, genericStructOverSignatureVariable.ConvertToCanonForm(CanonicalFormKind.Universal)); } [Theory] [InlineData(CanonicalizationMode.Standard)] [InlineData(CanonicalizationMode.RuntimeDetermined)] public void TestGenericTypesNegative(CanonicalizationMode algorithmType) { _context.CanonMode = algorithmType; // Two different types instantiated over the same type are not canonically equivalent var referenceOverReference = _genericReferenceType.MakeInstantiatedType(_referenceType); var structOverReference = _genericStructType.MakeInstantiatedType(_referenceType); Assert.NotSame( referenceOverReference.ConvertToCanonForm(CanonicalFormKind.Specific), structOverReference.ConvertToCanonForm(CanonicalFormKind.Specific)); Assert.NotSame( referenceOverReference.ConvertToCanonForm(CanonicalFormKind.Universal), structOverReference.ConvertToCanonForm(CanonicalFormKind.Universal)); // Specific canonical forms of reference type over reference and value types are not equivalent var referenceOverStruct = _genericReferenceType.MakeInstantiatedType(_structType); var referenceOverOtherStruct = _genericReferenceType.MakeInstantiatedType(_otherStructType); Assert.NotSame( referenceOverStruct.ConvertToCanonForm(CanonicalFormKind.Specific), referenceOverOtherStruct.ConvertToCanonForm(CanonicalFormKind.Specific)); var threeParamReferenceOverS1R2S1 = _genericReferenceTypeWithThreeParams.MakeInstantiatedType( _structType, _otherReferenceType, _structType); var threeParamReferenceOverS1R2S2 = _genericReferenceTypeWithThreeParams.MakeInstantiatedType( _structType, _otherReferenceType, _otherStructType); Assert.NotSame( threeParamReferenceOverS1R2S1.ConvertToCanonForm(CanonicalFormKind.Specific), threeParamReferenceOverS1R2S2.ConvertToCanonForm(CanonicalFormKind.Specific)); } [Theory] [InlineData(CanonicalizationMode.Standard)] [InlineData(CanonicalizationMode.RuntimeDetermined)] public void TestArrayTypes(CanonicalizationMode algorithmType) { _context.CanonMode = algorithmType; // Generic type instantiated over an array has the same canonical form as generic type over any other reference type var genericStructOverArrayOfInt = _genericStructType.MakeInstantiatedType(_context.GetWellKnownType(WellKnownType.Int32).MakeArrayType()); var genericStructOverReferenceType = _genericStructType.MakeInstantiatedType(_referenceType); Assert.Same( genericStructOverArrayOfInt.ConvertToCanonForm(CanonicalFormKind.Specific), genericStructOverReferenceType.ConvertToCanonForm(CanonicalFormKind.Specific)); Assert.Same( genericStructOverArrayOfInt.ConvertToCanonForm(CanonicalFormKind.Universal), genericStructOverReferenceType.ConvertToCanonForm(CanonicalFormKind.Universal)); // Canonical form of SzArray and Multidim array are not the same var arrayOfReferenceType = _referenceType.MakeArrayType(); var mdArrayOfReferenceType = _referenceType.MakeArrayType(1); Assert.NotSame( arrayOfReferenceType.ConvertToCanonForm(CanonicalFormKind.Specific), mdArrayOfReferenceType.ConvertToCanonForm(CanonicalFormKind.Specific)); Assert.NotSame( arrayOfReferenceType.ConvertToCanonForm(CanonicalFormKind.Universal), mdArrayOfReferenceType.ConvertToCanonForm(CanonicalFormKind.Universal)); // Canonical forms of arrays over different reference types are same var arrayOfOtherReferenceType = _otherReferenceType.MakeArrayType(); Assert.Same( arrayOfReferenceType.ConvertToCanonForm(CanonicalFormKind.Specific), arrayOfOtherReferenceType.ConvertToCanonForm(CanonicalFormKind.Specific)); Assert.Same( arrayOfReferenceType.ConvertToCanonForm(CanonicalFormKind.Universal), arrayOfOtherReferenceType.ConvertToCanonForm(CanonicalFormKind.Universal)); // Canonical forms of arrays of value types are only same for universal canon form var arrayOfStruct = _structType.MakeArrayType(); Assert.NotSame( arrayOfReferenceType.ConvertToCanonForm(CanonicalFormKind.Specific), arrayOfStruct.ConvertToCanonForm(CanonicalFormKind.Specific)); Assert.Same( arrayOfReferenceType.ConvertToCanonForm(CanonicalFormKind.Universal), arrayOfStruct.ConvertToCanonForm(CanonicalFormKind.Universal)); } [Theory] [InlineData(CanonicalizationMode.Standard)] [InlineData(CanonicalizationMode.RuntimeDetermined)] public void TestMethodsOnGenericTypes(CanonicalizationMode algorithmType) { _context.CanonMode = algorithmType; var referenceOverReference = _genericReferenceType.MakeInstantiatedType(_referenceType); var referenceOverOtherReference = _genericReferenceType.MakeInstantiatedType(_otherReferenceType); Assert.NotSame( referenceOverReference.GetMethod("Method", null), referenceOverOtherReference.GetMethod("Method", null)); Assert.Same( referenceOverReference.GetMethod("Method", null).GetCanonMethodTarget(CanonicalFormKind.Specific), referenceOverOtherReference.GetMethod("Method", null).GetCanonMethodTarget(CanonicalFormKind.Specific)); Assert.Same( referenceOverReference.GetMethod("Method", null).GetCanonMethodTarget(CanonicalFormKind.Universal), referenceOverOtherReference.GetMethod("Method", null).GetCanonMethodTarget(CanonicalFormKind.Universal)); var referenceOverStruct = _genericReferenceType.MakeInstantiatedType(_structType); Assert.NotSame( referenceOverReference.GetMethod("Method", null).GetCanonMethodTarget(CanonicalFormKind.Specific), referenceOverStruct.GetMethod("Method", null).GetCanonMethodTarget(CanonicalFormKind.Specific)); Assert.Same( referenceOverReference.GetMethod("Method", null).GetCanonMethodTarget(CanonicalFormKind.Universal), referenceOverStruct.GetMethod("Method", null).GetCanonMethodTarget(CanonicalFormKind.Universal)); Assert.Same( referenceOverReference.GetMethod("GenericMethod", null).MakeInstantiatedMethod(_referenceType).GetCanonMethodTarget(CanonicalFormKind.Specific), referenceOverOtherReference.GetMethod("GenericMethod", null).MakeInstantiatedMethod(_otherReferenceType).GetCanonMethodTarget(CanonicalFormKind.Specific)); Assert.Same( referenceOverReference.GetMethod("GenericMethod", null).MakeInstantiatedMethod(_referenceType).GetCanonMethodTarget(CanonicalFormKind.Universal), referenceOverOtherReference.GetMethod("GenericMethod", null).MakeInstantiatedMethod(_otherReferenceType).GetCanonMethodTarget(CanonicalFormKind.Universal)); Assert.NotSame( referenceOverReference.GetMethod("GenericMethod", null).MakeInstantiatedMethod(_referenceType).GetCanonMethodTarget(CanonicalFormKind.Specific), referenceOverOtherReference.GetMethod("GenericMethod", null).MakeInstantiatedMethod(_structType).GetCanonMethodTarget(CanonicalFormKind.Specific)); Assert.Same( referenceOverReference.GetMethod("GenericMethod", null).MakeInstantiatedMethod(_referenceType).GetCanonMethodTarget(CanonicalFormKind.Universal), referenceOverOtherReference.GetMethod("GenericMethod", null).MakeInstantiatedMethod(_structType).GetCanonMethodTarget(CanonicalFormKind.Universal)); Assert.NotSame( referenceOverStruct.GetMethod("GenericMethod", null).MakeInstantiatedMethod(_referenceType).GetCanonMethodTarget(CanonicalFormKind.Specific), referenceOverOtherReference.GetMethod("GenericMethod", null).MakeInstantiatedMethod(_structType).GetCanonMethodTarget(CanonicalFormKind.Specific)); Assert.Same( referenceOverStruct.GetMethod("GenericMethod", null).MakeInstantiatedMethod(_referenceType).GetCanonMethodTarget(CanonicalFormKind.Universal), referenceOverOtherReference.GetMethod("GenericMethod", null).MakeInstantiatedMethod(_structType).GetCanonMethodTarget(CanonicalFormKind.Universal)); Assert.NotSame( referenceOverReference.GetMethod("GenericMethod", null).MakeInstantiatedMethod(_structType), referenceOverReference.GetMethod("GenericMethod", null).MakeInstantiatedMethod(_structType).GetCanonMethodTarget(CanonicalFormKind.Specific)); } [Theory] [InlineData(CanonicalizationMode.Standard)] [InlineData(CanonicalizationMode.RuntimeDetermined)] public void TestArrayMethods(CanonicalizationMode algorithmType) { _context.CanonMode = algorithmType; var arrayOfReferenceType = _referenceType.MakeArrayType(1); var arrayOfOtherReferenceType = _otherReferenceType.MakeArrayType(1); Assert.Same( arrayOfReferenceType.GetMethod("Set", null).GetCanonMethodTarget(CanonicalFormKind.Specific), arrayOfOtherReferenceType.GetMethod("Set", null).GetCanonMethodTarget(CanonicalFormKind.Specific)); Assert.Same( arrayOfReferenceType.GetMethod("Set", null).GetCanonMethodTarget(CanonicalFormKind.Universal), arrayOfOtherReferenceType.GetMethod("Set", null).GetCanonMethodTarget(CanonicalFormKind.Universal)); var arrayOfStruct = _structType.MakeArrayType(1); Assert.NotSame( arrayOfReferenceType.GetMethod("Set", null).GetCanonMethodTarget(CanonicalFormKind.Specific), arrayOfStruct.GetMethod("Set", null).GetCanonMethodTarget(CanonicalFormKind.Specific)); Assert.Same( arrayOfReferenceType.GetMethod("Set", null).GetCanonMethodTarget(CanonicalFormKind.Universal), arrayOfStruct.GetMethod("Set", null).GetCanonMethodTarget(CanonicalFormKind.Universal)); } [Theory] [InlineData(CanonicalizationMode.Standard)] [InlineData(CanonicalizationMode.RuntimeDetermined)] public void TestUpgradeToUniversalCanon(CanonicalizationMode algorithmType) { _context.CanonMode = algorithmType; var gstOverUniversalCanon = _genericStructType.MakeInstantiatedType(_context.UniversalCanonType); var grtOverRtRtStOverUniversal = _genericReferenceTypeWithThreeParams.MakeInstantiatedType( _referenceType, _referenceType, gstOverUniversalCanon); var grtOverRtRtStOverUniversalCanon = grtOverRtRtStOverUniversal.ConvertToCanonForm(CanonicalFormKind.Specific); // Specific form gets upgraded to universal in the presence of universal canon. // GenericReferenceTypeWithThreeParams<ReferenceType, ReferenceType, GenericStructType<__UniversalCanon>> is // GenericReferenceTypeWithThreeParams<T__UniversalCanon, U__UniversalCanon, V__UniversalCanon> Assert.Same(_context.UniversalCanonType, grtOverRtRtStOverUniversalCanon.Instantiation[0]); Assert.Same(_context.UniversalCanonType, grtOverRtRtStOverUniversalCanon.Instantiation[2]); } [Theory] [InlineData(CanonicalizationMode.Standard)] [InlineData(CanonicalizationMode.RuntimeDetermined)] public void TestDowngradeFromUniversalCanon(CanonicalizationMode algorithmType) { _context.CanonMode = algorithmType; var grtOverUniversalCanon = _genericReferenceType.MakeInstantiatedType(_context.UniversalCanonType); var gstOverGrtOverUniversalCanon = _genericStructType.MakeInstantiatedType(grtOverUniversalCanon); var gstOverCanon = _genericStructType.MakeInstantiatedType(_context.CanonType); Assert.Same(gstOverCanon, gstOverGrtOverUniversalCanon.ConvertToCanonForm(CanonicalFormKind.Specific)); var gstOverGstOverGrtOverUniversalCanon = _genericStructType.MakeInstantiatedType(gstOverGrtOverUniversalCanon); var gstOverGstOverCanon = _genericStructType.MakeInstantiatedType(gstOverCanon); Assert.Same(gstOverGstOverCanon, gstOverGstOverGrtOverUniversalCanon.ConvertToCanonForm(CanonicalFormKind.Specific)); } [Fact] public void TestCanonicalizationOfRuntimeDeterminedUniversalGeneric() { var gstOverUniversalCanon = _genericStructType.MakeInstantiatedType(_context.UniversalCanonType); var rdtUniversalCanon = (RuntimeDeterminedType)gstOverUniversalCanon.ConvertToSharedRuntimeDeterminedForm().Instantiation[0]; Assert.Same(_context.UniversalCanonType, rdtUniversalCanon.CanonicalType); var gstOverRdtUniversalCanon = _genericStructType.MakeInstantiatedType(rdtUniversalCanon); Assert.Same(gstOverUniversalCanon, gstOverRdtUniversalCanon.ConvertToCanonForm(CanonicalFormKind.Specific)); } } }
-1
dotnet/runtime
66,372
Add Stopwatch.GetElapsedTime
Fixes https://github.com/dotnet/runtime/issues/65858
stephentoub
2022-03-09T01:52:28Z
2022-03-09T12:42:15Z
ca731545a58307870a0baebb0ee43eeea61f175f
c9f7f7389e8e9a00d501aef696333b67d218baac
Add Stopwatch.GetElapsedTime. Fixes https://github.com/dotnet/runtime/issues/65858
./src/tests/GC/Scenarios/GCSimulator/GCSimulator_295.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <AllowUnsafeBlocks>true</AllowUnsafeBlocks> <GCStressIncompatible>true</GCStressIncompatible> <CLRTestExecutionArguments>-t 3 -tp 0 -dz 17 -sdz 8500 -dc 10000 -sdc 5000 -lt 4 -f -dp 0.4 -dw 0.4</CLRTestExecutionArguments> <IsGCSimulatorTest>true</IsGCSimulatorTest> <CLRTestProjectToRun>GCSimulator.csproj</CLRTestProjectToRun> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <ItemGroup> <Compile Include="GCSimulator.cs" /> <Compile Include="lifetimefx.cs" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <AllowUnsafeBlocks>true</AllowUnsafeBlocks> <GCStressIncompatible>true</GCStressIncompatible> <CLRTestExecutionArguments>-t 3 -tp 0 -dz 17 -sdz 8500 -dc 10000 -sdc 5000 -lt 4 -f -dp 0.4 -dw 0.4</CLRTestExecutionArguments> <IsGCSimulatorTest>true</IsGCSimulatorTest> <CLRTestProjectToRun>GCSimulator.csproj</CLRTestProjectToRun> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <ItemGroup> <Compile Include="GCSimulator.cs" /> <Compile Include="lifetimefx.cs" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,372
Add Stopwatch.GetElapsedTime
Fixes https://github.com/dotnet/runtime/issues/65858
stephentoub
2022-03-09T01:52:28Z
2022-03-09T12:42:15Z
ca731545a58307870a0baebb0ee43eeea61f175f
c9f7f7389e8e9a00d501aef696333b67d218baac
Add Stopwatch.GetElapsedTime. Fixes https://github.com/dotnet/runtime/issues/65858
./src/mono/mono/tests/hash-table.cs
using System.Collections; namespace Test { public class Test { public static int Main () { string[] names = { "one", "two", "three", "four" }; Hashtable hash = new Hashtable (); for (int i=0; i < names.Length; ++i) { hash.Add (names [i], i); } if ((int)hash ["one"] != 0) return 1; if ((int)hash ["two"] != 1) return 2; if ((int)hash ["three"] != 2) return 3; if ((int)hash ["four"] != 3) return 4; if (hash.Contains("urka")) return 5; return 0; } } }
using System.Collections; namespace Test { public class Test { public static int Main () { string[] names = { "one", "two", "three", "four" }; Hashtable hash = new Hashtable (); for (int i=0; i < names.Length; ++i) { hash.Add (names [i], i); } if ((int)hash ["one"] != 0) return 1; if ((int)hash ["two"] != 1) return 2; if ((int)hash ["three"] != 2) return 3; if ((int)hash ["four"] != 3) return 4; if (hash.Contains("urka")) return 5; return 0; } } }
-1
dotnet/runtime
66,372
Add Stopwatch.GetElapsedTime
Fixes https://github.com/dotnet/runtime/issues/65858
stephentoub
2022-03-09T01:52:28Z
2022-03-09T12:42:15Z
ca731545a58307870a0baebb0ee43eeea61f175f
c9f7f7389e8e9a00d501aef696333b67d218baac
Add Stopwatch.GetElapsedTime. Fixes https://github.com/dotnet/runtime/issues/65858
./src/tests/JIT/HardwareIntrinsics/Arm/AdvSimd/OrNot.Vector64.Int64.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void OrNot_Vector64_Int64() { var test = new SimpleBinaryOpTest__OrNot_Vector64_Int64(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); } // Validates passing a static member works test.RunClsVarScenario(); if (AdvSimd.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class SimpleBinaryOpTest__OrNot_Vector64_Int64 { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private byte[] outArray; private GCHandle inHandle1; private GCHandle inHandle2; private GCHandle outHandle; private ulong alignment; public DataTable(Int64[] inArray1, Int64[] inArray2, Int64[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int64>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Int64>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int64>(); if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int64, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Int64, byte>(ref inArray2[0]), (uint)sizeOfinArray2); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector64<Int64> _fld1; public Vector64<Int64> _fld2; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int64>, byte>(ref testStruct._fld1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int64>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int64>, byte>(ref testStruct._fld2), ref Unsafe.As<Int64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Int64>>()); return testStruct; } public void RunStructFldScenario(SimpleBinaryOpTest__OrNot_Vector64_Int64 testClass) { var result = AdvSimd.OrNot(_fld1, _fld2); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(SimpleBinaryOpTest__OrNot_Vector64_Int64 testClass) { fixed (Vector64<Int64>* pFld1 = &_fld1) fixed (Vector64<Int64>* pFld2 = &_fld2) { var result = AdvSimd.OrNot( AdvSimd.LoadVector64((Int64*)(pFld1)), AdvSimd.LoadVector64((Int64*)(pFld2)) ); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 8; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector64<Int64>>() / sizeof(Int64); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector64<Int64>>() / sizeof(Int64); private static readonly int RetElementCount = Unsafe.SizeOf<Vector64<Int64>>() / sizeof(Int64); private static Int64[] _data1 = new Int64[Op1ElementCount]; private static Int64[] _data2 = new Int64[Op2ElementCount]; private static Vector64<Int64> _clsVar1; private static Vector64<Int64> _clsVar2; private Vector64<Int64> _fld1; private Vector64<Int64> _fld2; private DataTable _dataTable; static SimpleBinaryOpTest__OrNot_Vector64_Int64() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int64>, byte>(ref _clsVar1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int64>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int64>, byte>(ref _clsVar2), ref Unsafe.As<Int64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Int64>>()); } public SimpleBinaryOpTest__OrNot_Vector64_Int64() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int64>, byte>(ref _fld1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int64>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int64>, byte>(ref _fld2), ref Unsafe.As<Int64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Int64>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); } _dataTable = new DataTable(_data1, _data2, new Int64[RetElementCount], LargestVectorSize); } public bool IsSupported => AdvSimd.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = AdvSimd.OrNot( Unsafe.Read<Vector64<Int64>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector64<Int64>>(_dataTable.inArray2Ptr) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = AdvSimd.OrNot( AdvSimd.LoadVector64((Int64*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector64((Int64*)(_dataTable.inArray2Ptr)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.OrNot), new Type[] { typeof(Vector64<Int64>), typeof(Vector64<Int64>) }) .Invoke(null, new object[] { Unsafe.Read<Vector64<Int64>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector64<Int64>>(_dataTable.inArray2Ptr) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector64<Int64>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.OrNot), new Type[] { typeof(Vector64<Int64>), typeof(Vector64<Int64>) }) .Invoke(null, new object[] { AdvSimd.LoadVector64((Int64*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector64((Int64*)(_dataTable.inArray2Ptr)) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector64<Int64>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = AdvSimd.OrNot( _clsVar1, _clsVar2 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector64<Int64>* pClsVar1 = &_clsVar1) fixed (Vector64<Int64>* pClsVar2 = &_clsVar2) { var result = AdvSimd.OrNot( AdvSimd.LoadVector64((Int64*)(pClsVar1)), AdvSimd.LoadVector64((Int64*)(pClsVar2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector64<Int64>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector64<Int64>>(_dataTable.inArray2Ptr); var result = AdvSimd.OrNot(op1, op2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var op1 = AdvSimd.LoadVector64((Int64*)(_dataTable.inArray1Ptr)); var op2 = AdvSimd.LoadVector64((Int64*)(_dataTable.inArray2Ptr)); var result = AdvSimd.OrNot(op1, op2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new SimpleBinaryOpTest__OrNot_Vector64_Int64(); var result = AdvSimd.OrNot(test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new SimpleBinaryOpTest__OrNot_Vector64_Int64(); fixed (Vector64<Int64>* pFld1 = &test._fld1) fixed (Vector64<Int64>* pFld2 = &test._fld2) { var result = AdvSimd.OrNot( AdvSimd.LoadVector64((Int64*)(pFld1)), AdvSimd.LoadVector64((Int64*)(pFld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = AdvSimd.OrNot(_fld1, _fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector64<Int64>* pFld1 = &_fld1) fixed (Vector64<Int64>* pFld2 = &_fld2) { var result = AdvSimd.OrNot( AdvSimd.LoadVector64((Int64*)(pFld1)), AdvSimd.LoadVector64((Int64*)(pFld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = AdvSimd.OrNot(test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = AdvSimd.OrNot( AdvSimd.LoadVector64((Int64*)(&test._fld1)), AdvSimd.LoadVector64((Int64*)(&test._fld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector64<Int64> op1, Vector64<Int64> op2, void* result, [CallerMemberName] string method = "") { Int64[] inArray1 = new Int64[Op1ElementCount]; Int64[] inArray2 = new Int64[Op2ElementCount]; Int64[] outArray = new Int64[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Int64, byte>(ref inArray1[0]), op1); Unsafe.WriteUnaligned(ref Unsafe.As<Int64, byte>(ref inArray2[0]), op2); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Int64>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(void* op1, void* op2, void* result, [CallerMemberName] string method = "") { Int64[] inArray1 = new Int64[Op1ElementCount]; Int64[] inArray2 = new Int64[Op2ElementCount]; Int64[] outArray = new Int64[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector64<Int64>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector64<Int64>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Int64>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(Int64[] left, Int64[] right, Int64[] result, [CallerMemberName] string method = "") { bool succeeded = true; for (var i = 0; i < RetElementCount; i++) { if (Helpers.OrNot(left[i], right[i]) != result[i]) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.OrNot)}<Int64>(Vector64<Int64>, Vector64<Int64>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})"); TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void OrNot_Vector64_Int64() { var test = new SimpleBinaryOpTest__OrNot_Vector64_Int64(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); } // Validates passing a static member works test.RunClsVarScenario(); if (AdvSimd.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class SimpleBinaryOpTest__OrNot_Vector64_Int64 { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private byte[] outArray; private GCHandle inHandle1; private GCHandle inHandle2; private GCHandle outHandle; private ulong alignment; public DataTable(Int64[] inArray1, Int64[] inArray2, Int64[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int64>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Int64>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int64>(); if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int64, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Int64, byte>(ref inArray2[0]), (uint)sizeOfinArray2); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector64<Int64> _fld1; public Vector64<Int64> _fld2; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int64>, byte>(ref testStruct._fld1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int64>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int64>, byte>(ref testStruct._fld2), ref Unsafe.As<Int64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Int64>>()); return testStruct; } public void RunStructFldScenario(SimpleBinaryOpTest__OrNot_Vector64_Int64 testClass) { var result = AdvSimd.OrNot(_fld1, _fld2); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(SimpleBinaryOpTest__OrNot_Vector64_Int64 testClass) { fixed (Vector64<Int64>* pFld1 = &_fld1) fixed (Vector64<Int64>* pFld2 = &_fld2) { var result = AdvSimd.OrNot( AdvSimd.LoadVector64((Int64*)(pFld1)), AdvSimd.LoadVector64((Int64*)(pFld2)) ); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 8; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector64<Int64>>() / sizeof(Int64); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector64<Int64>>() / sizeof(Int64); private static readonly int RetElementCount = Unsafe.SizeOf<Vector64<Int64>>() / sizeof(Int64); private static Int64[] _data1 = new Int64[Op1ElementCount]; private static Int64[] _data2 = new Int64[Op2ElementCount]; private static Vector64<Int64> _clsVar1; private static Vector64<Int64> _clsVar2; private Vector64<Int64> _fld1; private Vector64<Int64> _fld2; private DataTable _dataTable; static SimpleBinaryOpTest__OrNot_Vector64_Int64() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int64>, byte>(ref _clsVar1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int64>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int64>, byte>(ref _clsVar2), ref Unsafe.As<Int64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Int64>>()); } public SimpleBinaryOpTest__OrNot_Vector64_Int64() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int64>, byte>(ref _fld1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int64>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int64>, byte>(ref _fld2), ref Unsafe.As<Int64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Int64>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); } _dataTable = new DataTable(_data1, _data2, new Int64[RetElementCount], LargestVectorSize); } public bool IsSupported => AdvSimd.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = AdvSimd.OrNot( Unsafe.Read<Vector64<Int64>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector64<Int64>>(_dataTable.inArray2Ptr) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = AdvSimd.OrNot( AdvSimd.LoadVector64((Int64*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector64((Int64*)(_dataTable.inArray2Ptr)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.OrNot), new Type[] { typeof(Vector64<Int64>), typeof(Vector64<Int64>) }) .Invoke(null, new object[] { Unsafe.Read<Vector64<Int64>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector64<Int64>>(_dataTable.inArray2Ptr) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector64<Int64>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.OrNot), new Type[] { typeof(Vector64<Int64>), typeof(Vector64<Int64>) }) .Invoke(null, new object[] { AdvSimd.LoadVector64((Int64*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector64((Int64*)(_dataTable.inArray2Ptr)) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector64<Int64>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = AdvSimd.OrNot( _clsVar1, _clsVar2 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector64<Int64>* pClsVar1 = &_clsVar1) fixed (Vector64<Int64>* pClsVar2 = &_clsVar2) { var result = AdvSimd.OrNot( AdvSimd.LoadVector64((Int64*)(pClsVar1)), AdvSimd.LoadVector64((Int64*)(pClsVar2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector64<Int64>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector64<Int64>>(_dataTable.inArray2Ptr); var result = AdvSimd.OrNot(op1, op2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var op1 = AdvSimd.LoadVector64((Int64*)(_dataTable.inArray1Ptr)); var op2 = AdvSimd.LoadVector64((Int64*)(_dataTable.inArray2Ptr)); var result = AdvSimd.OrNot(op1, op2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new SimpleBinaryOpTest__OrNot_Vector64_Int64(); var result = AdvSimd.OrNot(test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new SimpleBinaryOpTest__OrNot_Vector64_Int64(); fixed (Vector64<Int64>* pFld1 = &test._fld1) fixed (Vector64<Int64>* pFld2 = &test._fld2) { var result = AdvSimd.OrNot( AdvSimd.LoadVector64((Int64*)(pFld1)), AdvSimd.LoadVector64((Int64*)(pFld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = AdvSimd.OrNot(_fld1, _fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector64<Int64>* pFld1 = &_fld1) fixed (Vector64<Int64>* pFld2 = &_fld2) { var result = AdvSimd.OrNot( AdvSimd.LoadVector64((Int64*)(pFld1)), AdvSimd.LoadVector64((Int64*)(pFld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = AdvSimd.OrNot(test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = AdvSimd.OrNot( AdvSimd.LoadVector64((Int64*)(&test._fld1)), AdvSimd.LoadVector64((Int64*)(&test._fld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector64<Int64> op1, Vector64<Int64> op2, void* result, [CallerMemberName] string method = "") { Int64[] inArray1 = new Int64[Op1ElementCount]; Int64[] inArray2 = new Int64[Op2ElementCount]; Int64[] outArray = new Int64[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Int64, byte>(ref inArray1[0]), op1); Unsafe.WriteUnaligned(ref Unsafe.As<Int64, byte>(ref inArray2[0]), op2); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Int64>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(void* op1, void* op2, void* result, [CallerMemberName] string method = "") { Int64[] inArray1 = new Int64[Op1ElementCount]; Int64[] inArray2 = new Int64[Op2ElementCount]; Int64[] outArray = new Int64[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector64<Int64>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector64<Int64>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Int64>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(Int64[] left, Int64[] right, Int64[] result, [CallerMemberName] string method = "") { bool succeeded = true; for (var i = 0; i < RetElementCount; i++) { if (Helpers.OrNot(left[i], right[i]) != result[i]) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.OrNot)}<Int64>(Vector64<Int64>, Vector64<Int64>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})"); TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
-1
dotnet/runtime
66,372
Add Stopwatch.GetElapsedTime
Fixes https://github.com/dotnet/runtime/issues/65858
stephentoub
2022-03-09T01:52:28Z
2022-03-09T12:42:15Z
ca731545a58307870a0baebb0ee43eeea61f175f
c9f7f7389e8e9a00d501aef696333b67d218baac
Add Stopwatch.GetElapsedTime. Fixes https://github.com/dotnet/runtime/issues/65858
./src/tests/JIT/Directed/coverage/importer/Desktop/ldfldstatic1_il_r.ilproj
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <OutputType>Exe</OutputType> <RestorePackages>true</RestorePackages> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>PdbOnly</DebugType> <Optimize>False</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="ldfldstatic1.il" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <OutputType>Exe</OutputType> <RestorePackages>true</RestorePackages> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>PdbOnly</DebugType> <Optimize>False</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="ldfldstatic1.il" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,372
Add Stopwatch.GetElapsedTime
Fixes https://github.com/dotnet/runtime/issues/65858
stephentoub
2022-03-09T01:52:28Z
2022-03-09T12:42:15Z
ca731545a58307870a0baebb0ee43eeea61f175f
c9f7f7389e8e9a00d501aef696333b67d218baac
Add Stopwatch.GetElapsedTime. Fixes https://github.com/dotnet/runtime/issues/65858
./src/tests/JIT/Methodical/VT/port/lcs_gcref_do.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <GCStressIncompatible>true</GCStressIncompatible> </PropertyGroup> <PropertyGroup> <DebugType>Full</DebugType> <Optimize>True</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="lcs_gcref.cs" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <GCStressIncompatible>true</GCStressIncompatible> </PropertyGroup> <PropertyGroup> <DebugType>Full</DebugType> <Optimize>True</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="lcs_gcref.cs" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,372
Add Stopwatch.GetElapsedTime
Fixes https://github.com/dotnet/runtime/issues/65858
stephentoub
2022-03-09T01:52:28Z
2022-03-09T12:42:15Z
ca731545a58307870a0baebb0ee43eeea61f175f
c9f7f7389e8e9a00d501aef696333b67d218baac
Add Stopwatch.GetElapsedTime. Fixes https://github.com/dotnet/runtime/issues/65858
./src/tests/JIT/jit64/hfa/main/testE/hfa_sd2E_r.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>PdbOnly</DebugType> </PropertyGroup> <ItemGroup> <Compile Include="hfa_testE.cs" /> </ItemGroup> <ItemGroup> <ProjectReference Include="..\dll\common.csproj" /> <ProjectReference Include="..\dll\hfa_simple_f64_common.csproj" /> <ProjectReference Include="..\dll\hfa_simple_f64_interop_cpp.csproj" /> <CMakeProjectReference Include="..\dll\CMakelists.txt" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>PdbOnly</DebugType> </PropertyGroup> <ItemGroup> <Compile Include="hfa_testE.cs" /> </ItemGroup> <ItemGroup> <ProjectReference Include="..\dll\common.csproj" /> <ProjectReference Include="..\dll\hfa_simple_f64_common.csproj" /> <ProjectReference Include="..\dll\hfa_simple_f64_interop_cpp.csproj" /> <CMakeProjectReference Include="..\dll\CMakelists.txt" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,372
Add Stopwatch.GetElapsedTime
Fixes https://github.com/dotnet/runtime/issues/65858
stephentoub
2022-03-09T01:52:28Z
2022-03-09T12:42:15Z
ca731545a58307870a0baebb0ee43eeea61f175f
c9f7f7389e8e9a00d501aef696333b67d218baac
Add Stopwatch.GetElapsedTime. Fixes https://github.com/dotnet/runtime/issues/65858
./src/libraries/Microsoft.Extensions.DependencyInjection/tests/DI.Tests/AsyncServiceScopeTests.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Threading.Tasks; using Xunit; namespace Microsoft.Extensions.DependencyInjection.Tests { public class AsyncServiceScopeTests { [Fact] public void ThrowsIfServiceScopeIsNull() { var exception = Assert.Throws<ArgumentNullException>(() => new AsyncServiceScope(null)); Assert.Equal("serviceScope", exception.ParamName); } [Fact] public void ReturnsServiceProviderFromWrappedScope() { var wrappedScope = new FakeSyncServiceScope(); var asyncScope = new AsyncServiceScope(wrappedScope); Assert.Same(wrappedScope.ServiceProvider, asyncScope.ServiceProvider); } [Fact] public void CallsDisposeOnWrappedSyncScopeOnDispose() { var wrappedScope = new FakeSyncServiceScope(); var asyncScope = new AsyncServiceScope(wrappedScope); asyncScope.Dispose(); Assert.True(wrappedScope.DisposeCalled); } [Fact] public async ValueTask CallsDisposeOnWrappedSyncScopeOnDisposeAsync() { var wrappedScope = new FakeSyncServiceScope(); var asyncScope = new AsyncServiceScope(wrappedScope); await asyncScope.DisposeAsync(); Assert.True(wrappedScope.DisposeCalled); } [Fact] public void CallsDisposeOnWrappedAsyncScopeOnDispose() { var wrappedScope = new FakeAsyncServiceScope(); var asyncScope = new AsyncServiceScope(wrappedScope); asyncScope.Dispose(); Assert.True(wrappedScope.DisposeCalled); Assert.False(wrappedScope.DisposeAsyncCalled); } [Fact] public async ValueTask CallsDisposeAsyncOnWrappedSyncScopeOnDisposeAsync() { var wrappedScope = new FakeAsyncServiceScope(); var asyncScope = new AsyncServiceScope(wrappedScope); await asyncScope.DisposeAsync(); Assert.False(wrappedScope.DisposeCalled); Assert.True(wrappedScope.DisposeAsyncCalled); } public class FakeServiceProvider : IServiceProvider { public object? GetService(Type serviceType) => throw new NotImplementedException(); } public class FakeSyncServiceScope : IServiceScope { public FakeSyncServiceScope() { ServiceProvider = new FakeServiceProvider(); } public IServiceProvider ServiceProvider { get; } public bool DisposeCalled { get; private set; } public void Dispose() { DisposeCalled = true; } } public class FakeAsyncServiceScope : FakeSyncServiceScope, IAsyncDisposable { public FakeAsyncServiceScope() : base() { } public bool DisposeAsyncCalled { get; private set; } public ValueTask DisposeAsync() { DisposeAsyncCalled = true; return default; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Threading.Tasks; using Xunit; namespace Microsoft.Extensions.DependencyInjection.Tests { public class AsyncServiceScopeTests { [Fact] public void ThrowsIfServiceScopeIsNull() { var exception = Assert.Throws<ArgumentNullException>(() => new AsyncServiceScope(null)); Assert.Equal("serviceScope", exception.ParamName); } [Fact] public void ReturnsServiceProviderFromWrappedScope() { var wrappedScope = new FakeSyncServiceScope(); var asyncScope = new AsyncServiceScope(wrappedScope); Assert.Same(wrappedScope.ServiceProvider, asyncScope.ServiceProvider); } [Fact] public void CallsDisposeOnWrappedSyncScopeOnDispose() { var wrappedScope = new FakeSyncServiceScope(); var asyncScope = new AsyncServiceScope(wrappedScope); asyncScope.Dispose(); Assert.True(wrappedScope.DisposeCalled); } [Fact] public async ValueTask CallsDisposeOnWrappedSyncScopeOnDisposeAsync() { var wrappedScope = new FakeSyncServiceScope(); var asyncScope = new AsyncServiceScope(wrappedScope); await asyncScope.DisposeAsync(); Assert.True(wrappedScope.DisposeCalled); } [Fact] public void CallsDisposeOnWrappedAsyncScopeOnDispose() { var wrappedScope = new FakeAsyncServiceScope(); var asyncScope = new AsyncServiceScope(wrappedScope); asyncScope.Dispose(); Assert.True(wrappedScope.DisposeCalled); Assert.False(wrappedScope.DisposeAsyncCalled); } [Fact] public async ValueTask CallsDisposeAsyncOnWrappedSyncScopeOnDisposeAsync() { var wrappedScope = new FakeAsyncServiceScope(); var asyncScope = new AsyncServiceScope(wrappedScope); await asyncScope.DisposeAsync(); Assert.False(wrappedScope.DisposeCalled); Assert.True(wrappedScope.DisposeAsyncCalled); } public class FakeServiceProvider : IServiceProvider { public object? GetService(Type serviceType) => throw new NotImplementedException(); } public class FakeSyncServiceScope : IServiceScope { public FakeSyncServiceScope() { ServiceProvider = new FakeServiceProvider(); } public IServiceProvider ServiceProvider { get; } public bool DisposeCalled { get; private set; } public void Dispose() { DisposeCalled = true; } } public class FakeAsyncServiceScope : FakeSyncServiceScope, IAsyncDisposable { public FakeAsyncServiceScope() : base() { } public bool DisposeAsyncCalled { get; private set; } public ValueTask DisposeAsync() { DisposeAsyncCalled = true; return default; } } } }
-1
dotnet/runtime
66,372
Add Stopwatch.GetElapsedTime
Fixes https://github.com/dotnet/runtime/issues/65858
stephentoub
2022-03-09T01:52:28Z
2022-03-09T12:42:15Z
ca731545a58307870a0baebb0ee43eeea61f175f
c9f7f7389e8e9a00d501aef696333b67d218baac
Add Stopwatch.GetElapsedTime. Fixes https://github.com/dotnet/runtime/issues/65858
./src/tests/JIT/Regression/JitBlue/DevDiv_206786/handleMath.il
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // Need to be careful about constant folding and handles // during prejitting. .assembly extern mscorlib {} .assembly handleMath {} .assembly extern xunit.core {} .class public F { .method public instance void .ctor(int32 a) { ldarg.0 call instance void [mscorlib]System.Object::.ctor() ldarg.0 ldarg.1 ldc.i4 75 add stfld int32 F::A ret } .field public int32 A .method public int32 Fix() cil { ldarg.0 ldfld int32 F::A ldc.i4 125 add ret } .method public static native int Add(native int x) cil managed { ldarg.0 ldc.i4 5 add ret } .method public static native int Sub(native int x) cil managed noinlining { ldarg.0 ldc.i4 5 sub ret } .method public hidebysig static int32 Main(string[] args) cil managed { .custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = ( 01 00 00 00 ) .entrypoint ldc.i4 -100 newobj instance void F::.ctor(int32) ldftn instance int32 F::Fix() call native int F::Add(native int) call native int F::Sub(native int) calli int32(class F) ldc.i4 100 bne.un failure ldc.i4 -100 newobj instance void F::.ctor(int32) ldftn instance int32 F::Fix() neg neg calli int32(class F) ldc.i4 100 bne.un failure ldc.i4 -100 newobj instance void F::.ctor(int32) ldftn instance int32 F::Fix() not not calli int32(class F) ldc.i4 100 bne.un failure success: ldc.i4 100 ret failure: ldc.i4.0 ret } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // Need to be careful about constant folding and handles // during prejitting. .assembly extern mscorlib {} .assembly handleMath {} .assembly extern xunit.core {} .class public F { .method public instance void .ctor(int32 a) { ldarg.0 call instance void [mscorlib]System.Object::.ctor() ldarg.0 ldarg.1 ldc.i4 75 add stfld int32 F::A ret } .field public int32 A .method public int32 Fix() cil { ldarg.0 ldfld int32 F::A ldc.i4 125 add ret } .method public static native int Add(native int x) cil managed { ldarg.0 ldc.i4 5 add ret } .method public static native int Sub(native int x) cil managed noinlining { ldarg.0 ldc.i4 5 sub ret } .method public hidebysig static int32 Main(string[] args) cil managed { .custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = ( 01 00 00 00 ) .entrypoint ldc.i4 -100 newobj instance void F::.ctor(int32) ldftn instance int32 F::Fix() call native int F::Add(native int) call native int F::Sub(native int) calli int32(class F) ldc.i4 100 bne.un failure ldc.i4 -100 newobj instance void F::.ctor(int32) ldftn instance int32 F::Fix() neg neg calli int32(class F) ldc.i4 100 bne.un failure ldc.i4 -100 newobj instance void F::.ctor(int32) ldftn instance int32 F::Fix() not not calli int32(class F) ldc.i4 100 bne.un failure success: ldc.i4 100 ret failure: ldc.i4.0 ret } }
-1
dotnet/runtime
66,372
Add Stopwatch.GetElapsedTime
Fixes https://github.com/dotnet/runtime/issues/65858
stephentoub
2022-03-09T01:52:28Z
2022-03-09T12:42:15Z
ca731545a58307870a0baebb0ee43eeea61f175f
c9f7f7389e8e9a00d501aef696333b67d218baac
Add Stopwatch.GetElapsedTime. Fixes https://github.com/dotnet/runtime/issues/65858
./src/libraries/System.Private.Xml/tests/Xslt/TestFiles/TestData/XsltScenarios/EXslt/out/datetime-day-in-year.xml
<out> <test1>185</test1> <test1>NaN</test1> </out>
<out> <test1>185</test1> <test1>NaN</test1> </out>
-1
dotnet/runtime
66,372
Add Stopwatch.GetElapsedTime
Fixes https://github.com/dotnet/runtime/issues/65858
stephentoub
2022-03-09T01:52:28Z
2022-03-09T12:42:15Z
ca731545a58307870a0baebb0ee43eeea61f175f
c9f7f7389e8e9a00d501aef696333b67d218baac
Add Stopwatch.GetElapsedTime. Fixes https://github.com/dotnet/runtime/issues/65858
./src/native/eventpipe/ep-stream.h
#ifndef __EVENTPIPE_STREAM_H__ #define __EVENTPIPE_STREAM_H__ #include "ep-rt-config.h" #ifdef ENABLE_PERFTRACING #include "ep-types.h" #undef EP_IMPL_GETTER_SETTER #ifdef EP_IMPL_STREAM_GETTER_SETTER #define EP_IMPL_GETTER_SETTER #endif #include "ep-getter-setter.h" // the enumeration has a specific set of values to keep it compatible with consumer library // it's sibling is defined in https://github.com/Microsoft/perfview/blob/10d1f92b242c98073b3817ac5ee6d98cd595d39b/src/FastSerialization/FastSerialization.cs#L2295 typedef enum { FAST_SERIALIZER_TAGS_ERROR = 0, // To improve debugabilty, 0 is an illegal tag. FAST_SERIALIZER_TAGS_NULL_REFERENCE = 1, // Tag for a null object forwardReference. FAST_SERIALIZER_TAGS_OBJECT_REFERENCE = 2, // Followed by StreamLabel // 3 used to belong to ForwardReference, which got removed in V3 FAST_SERIALIZER_TAGS_BEGIN_OBJECT = 4, // Followed by Type object, object data, tagged EndObject FAST_SERIALIZER_TAGS_BEGIN_PRIVATE_OBJECT = 5, // Like beginObject, but not placed in interning table on deserialiation FAST_SERIALIZER_TAGS_END_OBJECT = 6, // Placed after an object to mark its end. // 7 used to belong to ForwardDefinition, which got removed in V3 FAST_SERIALIZER_TAGS_BYTE = 8, FAST_SERIALIZER_TAGS_INT16, FAST_SERIALIZER_TAGS_INT32, FAST_SERIALIZER_TAGS_INT64, FAST_SERIALIZER_TAGS_SKIP_REGION, FAST_SERIALIZER_TAGS_STRING, FAST_SERIALIZER_TAGS_BLOB, FAST_SERIALIZER_TAGS_LIMIT // Just past the last valid tag, used for asserts. } FastSerializerTags; /* * StreamWriter. */ typedef void (*StreamWriterFreeFunc)(void *stream); typedef bool (*StreamWriterWriteFunc)(void *stream, const uint8_t *buffer, const uint32_t bytes_to_write, uint32_t *bytes_written); struct _StreamWriterVtable { StreamWriterFreeFunc free_func; StreamWriterWriteFunc write_func; }; #if defined(EP_INLINE_GETTER_SETTER) || defined(EP_IMPL_STREAM_GETTER_SETTER) struct _StreamWriter { #else struct _StreamWriter_Internal { #endif StreamWriterVtable *vtable; }; #if !defined(EP_INLINE_GETTER_SETTER) && !defined(EP_IMPL_STREAM_GETTER_SETTER) struct _StreamWriter { uint8_t _internal [sizeof (struct _StreamWriter_Internal)]; }; #endif StreamWriter * ep_stream_writer_init ( StreamWriter *stream_writer, StreamWriterVtable *vtable); void ep_stream_writer_fini (StreamWriter *stream_writer); bool ep_stream_writer_write ( StreamWriter *stream_writer, const uint8_t *buffer, const uint32_t bytes_to_write, uint32_t *bytes_written); void ep_stream_writer_free_vcall (StreamWriter *stream_writer); bool ep_stream_writer_write_vcall ( StreamWriter *stream_writer, const uint8_t *buffer, const uint32_t bytes_to_write, uint32_t *bytes_written); /* * FastSerializableObject. */ typedef void (*FastSerializableObjectFreeFunc)(void *object); typedef void (*FastSerializableObjectFastSerializeFunc)(void *object, FastSerializer *fast_serializer); typedef const ep_char8_t * (*FastSerializableObjectGetTypeNameFunc)(void *object); struct _FastSerializableObjectVtable { FastSerializableObjectFreeFunc free_func; FastSerializableObjectFastSerializeFunc fast_serialize_func; FastSerializableObjectGetTypeNameFunc get_type_name_func; }; #if defined(EP_INLINE_GETTER_SETTER) || defined(EP_IMPL_STREAM_GETTER_SETTER) struct _FastSerializableObject { #else struct _FastSerializableObject_Internal { #endif FastSerializableObjectVtable *vtable; int32_t object_version; int32_t min_reader_version; bool is_private; }; #if !defined(EP_INLINE_GETTER_SETTER) && !defined(EP_IMPL_STREAM_GETTER_SETTER) struct _FastSerializableObject { uint8_t _internal [sizeof (struct _FastSerializableObject_Internal)]; }; #endif EP_DEFINE_GETTER(FastSerializableObject *, fast_serializable_object, FastSerializableObjectVtable *, vtable) FastSerializableObject * ep_fast_serializable_object_init ( FastSerializableObject *fast_serializable_object, FastSerializableObjectVtable *vtable, int32_t object_version, int32_t min_reader_version, bool is_private); void ep_fast_serializable_object_fini (FastSerializableObject *fast_serializable_object); void ep_fast_serializable_object_fast_serialize ( FastSerializableObject *fast_serializable_ojbect, FastSerializer *fast_serializer); const ep_char8_t * ep_fast_serializable_object_get_type_name (FastSerializableObject *fast_serializable_ojbect); void ep_fast_serializable_object_free_vcall (FastSerializableObject *fast_serializable_ojbect); const ep_char8_t * ep_fast_serializable_object_get_type_name_vcall (FastSerializableObject *fast_serializable_ojbect); void ep_fast_serializable_object_fast_serialize_vcall ( FastSerializableObject *fast_serializable_ojbect, FastSerializer *fast_serializer); /* * FastSerializer. */ #define FAST_SERIALIZER_ALIGNMENT_SIZE 4 #if defined(EP_INLINE_GETTER_SETTER) || defined(EP_IMPL_STREAM_GETTER_SETTER) struct _FastSerializer { #else struct _FastSerializer_Internal { #endif StreamWriter *stream_writer; uint32_t required_padding; bool write_error_encountered; }; #if !defined(EP_INLINE_GETTER_SETTER) && !defined(EP_IMPL_STREAM_GETTER_SETTER) struct _FastSerializer { uint8_t _internal [sizeof (struct _FastSerializer_Internal)]; }; #endif EP_DEFINE_GETTER(FastSerializer *, fast_serializer, StreamWriter *, stream_writer) EP_DEFINE_GETTER(FastSerializer *, fast_serializer, uint32_t, required_padding) EP_DEFINE_GETTER(FastSerializer *, fast_serializer, bool, write_error_encountered) FastSerializer * ep_fast_serializer_alloc (StreamWriter *stream_writer); void ep_fast_serializer_free (FastSerializer *fast_serializer); void ep_fast_serializer_write_buffer ( FastSerializer *fast_serializer, const uint8_t *buffer, uint32_t buffer_len); void ep_fast_serializer_write_object ( FastSerializer *fast_serializer, FastSerializableObject *fast_serializable_ojbect); void ep_fast_serializer_write_string ( FastSerializer *fast_serializer, const ep_char8_t *contents, uint32_t contents_len); void ep_fast_serializer_write_tag ( FastSerializer *fast_serializer, FastSerializerTags tag, const uint8_t *payload, uint32_t payload_len); /* * FileStream. */ #if defined(EP_INLINE_GETTER_SETTER) || defined(EP_IMPL_STREAM_GETTER_SETTER) struct _FileStream { #else struct _FileStream_Internal { #endif ep_rt_file_handle_t rt_file; }; #if !defined(EP_INLINE_GETTER_SETTER) && !defined(EP_IMPL_STREAM_GETTER_SETTER) struct _FileStream { uint8_t _internal [sizeof (struct _FileStream_Internal)]; }; #endif EP_DEFINE_GETTER(FileStream *, file_stream, ep_rt_file_handle_t, rt_file) EP_DEFINE_SETTER(FileStream *, file_stream, ep_rt_file_handle_t, rt_file) FileStream * ep_file_stream_alloc (void); void ep_file_stream_free (FileStream *file_stream); bool ep_file_stream_open_write ( FileStream *file_stream, const ep_char8_t *path); bool ep_file_stream_close (FileStream *file_stream); bool ep_file_stream_write ( FileStream *file_stream, const uint8_t *buffer, uint32_t bytes_to_write, uint32_t *bytes_written); /* * FileStreamWriter. */ #if defined(EP_INLINE_GETTER_SETTER) || defined(EP_IMPL_STREAM_GETTER_SETTER) struct _FileStreamWriter { #else struct _FileStreamWriter_Internal { #endif StreamWriter stream_writer; FileStream *file_stream; }; #if !defined(EP_INLINE_GETTER_SETTER) && !defined(EP_IMPL_STREAM_GETTER_SETTER) struct _FileStreamWriter { uint8_t _internal [sizeof (struct _FileStreamWriter_Internal)]; }; #endif EP_DEFINE_GETTER_REF(FileStreamWriter *, file_stream_writer, StreamWriter *, stream_writer) EP_DEFINE_GETTER(FileStreamWriter *, file_stream_writer, FileStream *, file_stream) FileStreamWriter * ep_file_stream_writer_alloc (const ep_char8_t *output_file_path); void ep_file_stream_writer_free (FileStreamWriter *file_stream_writer); bool ep_file_stream_writer_write ( FileStreamWriter *file_stream_writer, const uint8_t *buffer, uint32_t bytes_to_write, uint32_t *bytes_written); /* * IpcStreamWriter. */ #if defined(EP_INLINE_GETTER_SETTER) || defined(EP_IMPL_STREAM_GETTER_SETTER) struct _IpcStreamWriter { #else struct _IpcStreamWriter_Internal { #endif StreamWriter stream_writer; IpcStream *ipc_stream; }; #if !defined(EP_INLINE_GETTER_SETTER) && !defined(EP_IMPL_STREAM_GETTER_SETTER) struct _IpcStreamWriter { uint8_t _internal [sizeof (struct _IpcStreamWriter_Internal)]; }; #endif EP_DEFINE_GETTER_REF(IpcStreamWriter *, ipc_stream_writer, StreamWriter *, stream_writer) EP_DEFINE_GETTER(IpcStreamWriter *, ipc_stream_writer, IpcStream *, ipc_stream) IpcStreamWriter * ep_ipc_stream_writer_alloc ( uint64_t id, IpcStream *stream); void ep_ipc_stream_writer_free (IpcStreamWriter *ipc_stream_writer); bool ep_ipc_stream_writer_write ( IpcStreamWriter *ipc_stream_writer, const uint8_t *buffer, uint32_t bytes_to_write, uint32_t *bytes_written); #endif /* ENABLE_PERFTRACING */ #endif /* __EVENTPIPE_STREAM_H__ */
#ifndef __EVENTPIPE_STREAM_H__ #define __EVENTPIPE_STREAM_H__ #include "ep-rt-config.h" #ifdef ENABLE_PERFTRACING #include "ep-types.h" #undef EP_IMPL_GETTER_SETTER #ifdef EP_IMPL_STREAM_GETTER_SETTER #define EP_IMPL_GETTER_SETTER #endif #include "ep-getter-setter.h" // the enumeration has a specific set of values to keep it compatible with consumer library // it's sibling is defined in https://github.com/Microsoft/perfview/blob/10d1f92b242c98073b3817ac5ee6d98cd595d39b/src/FastSerialization/FastSerialization.cs#L2295 typedef enum { FAST_SERIALIZER_TAGS_ERROR = 0, // To improve debugabilty, 0 is an illegal tag. FAST_SERIALIZER_TAGS_NULL_REFERENCE = 1, // Tag for a null object forwardReference. FAST_SERIALIZER_TAGS_OBJECT_REFERENCE = 2, // Followed by StreamLabel // 3 used to belong to ForwardReference, which got removed in V3 FAST_SERIALIZER_TAGS_BEGIN_OBJECT = 4, // Followed by Type object, object data, tagged EndObject FAST_SERIALIZER_TAGS_BEGIN_PRIVATE_OBJECT = 5, // Like beginObject, but not placed in interning table on deserialiation FAST_SERIALIZER_TAGS_END_OBJECT = 6, // Placed after an object to mark its end. // 7 used to belong to ForwardDefinition, which got removed in V3 FAST_SERIALIZER_TAGS_BYTE = 8, FAST_SERIALIZER_TAGS_INT16, FAST_SERIALIZER_TAGS_INT32, FAST_SERIALIZER_TAGS_INT64, FAST_SERIALIZER_TAGS_SKIP_REGION, FAST_SERIALIZER_TAGS_STRING, FAST_SERIALIZER_TAGS_BLOB, FAST_SERIALIZER_TAGS_LIMIT // Just past the last valid tag, used for asserts. } FastSerializerTags; /* * StreamWriter. */ typedef void (*StreamWriterFreeFunc)(void *stream); typedef bool (*StreamWriterWriteFunc)(void *stream, const uint8_t *buffer, const uint32_t bytes_to_write, uint32_t *bytes_written); struct _StreamWriterVtable { StreamWriterFreeFunc free_func; StreamWriterWriteFunc write_func; }; #if defined(EP_INLINE_GETTER_SETTER) || defined(EP_IMPL_STREAM_GETTER_SETTER) struct _StreamWriter { #else struct _StreamWriter_Internal { #endif StreamWriterVtable *vtable; }; #if !defined(EP_INLINE_GETTER_SETTER) && !defined(EP_IMPL_STREAM_GETTER_SETTER) struct _StreamWriter { uint8_t _internal [sizeof (struct _StreamWriter_Internal)]; }; #endif StreamWriter * ep_stream_writer_init ( StreamWriter *stream_writer, StreamWriterVtable *vtable); void ep_stream_writer_fini (StreamWriter *stream_writer); bool ep_stream_writer_write ( StreamWriter *stream_writer, const uint8_t *buffer, const uint32_t bytes_to_write, uint32_t *bytes_written); void ep_stream_writer_free_vcall (StreamWriter *stream_writer); bool ep_stream_writer_write_vcall ( StreamWriter *stream_writer, const uint8_t *buffer, const uint32_t bytes_to_write, uint32_t *bytes_written); /* * FastSerializableObject. */ typedef void (*FastSerializableObjectFreeFunc)(void *object); typedef void (*FastSerializableObjectFastSerializeFunc)(void *object, FastSerializer *fast_serializer); typedef const ep_char8_t * (*FastSerializableObjectGetTypeNameFunc)(void *object); struct _FastSerializableObjectVtable { FastSerializableObjectFreeFunc free_func; FastSerializableObjectFastSerializeFunc fast_serialize_func; FastSerializableObjectGetTypeNameFunc get_type_name_func; }; #if defined(EP_INLINE_GETTER_SETTER) || defined(EP_IMPL_STREAM_GETTER_SETTER) struct _FastSerializableObject { #else struct _FastSerializableObject_Internal { #endif FastSerializableObjectVtable *vtable; int32_t object_version; int32_t min_reader_version; bool is_private; }; #if !defined(EP_INLINE_GETTER_SETTER) && !defined(EP_IMPL_STREAM_GETTER_SETTER) struct _FastSerializableObject { uint8_t _internal [sizeof (struct _FastSerializableObject_Internal)]; }; #endif EP_DEFINE_GETTER(FastSerializableObject *, fast_serializable_object, FastSerializableObjectVtable *, vtable) FastSerializableObject * ep_fast_serializable_object_init ( FastSerializableObject *fast_serializable_object, FastSerializableObjectVtable *vtable, int32_t object_version, int32_t min_reader_version, bool is_private); void ep_fast_serializable_object_fini (FastSerializableObject *fast_serializable_object); void ep_fast_serializable_object_fast_serialize ( FastSerializableObject *fast_serializable_ojbect, FastSerializer *fast_serializer); const ep_char8_t * ep_fast_serializable_object_get_type_name (FastSerializableObject *fast_serializable_ojbect); void ep_fast_serializable_object_free_vcall (FastSerializableObject *fast_serializable_ojbect); const ep_char8_t * ep_fast_serializable_object_get_type_name_vcall (FastSerializableObject *fast_serializable_ojbect); void ep_fast_serializable_object_fast_serialize_vcall ( FastSerializableObject *fast_serializable_ojbect, FastSerializer *fast_serializer); /* * FastSerializer. */ #define FAST_SERIALIZER_ALIGNMENT_SIZE 4 #if defined(EP_INLINE_GETTER_SETTER) || defined(EP_IMPL_STREAM_GETTER_SETTER) struct _FastSerializer { #else struct _FastSerializer_Internal { #endif StreamWriter *stream_writer; uint32_t required_padding; bool write_error_encountered; }; #if !defined(EP_INLINE_GETTER_SETTER) && !defined(EP_IMPL_STREAM_GETTER_SETTER) struct _FastSerializer { uint8_t _internal [sizeof (struct _FastSerializer_Internal)]; }; #endif EP_DEFINE_GETTER(FastSerializer *, fast_serializer, StreamWriter *, stream_writer) EP_DEFINE_GETTER(FastSerializer *, fast_serializer, uint32_t, required_padding) EP_DEFINE_GETTER(FastSerializer *, fast_serializer, bool, write_error_encountered) FastSerializer * ep_fast_serializer_alloc (StreamWriter *stream_writer); void ep_fast_serializer_free (FastSerializer *fast_serializer); void ep_fast_serializer_write_buffer ( FastSerializer *fast_serializer, const uint8_t *buffer, uint32_t buffer_len); void ep_fast_serializer_write_object ( FastSerializer *fast_serializer, FastSerializableObject *fast_serializable_ojbect); void ep_fast_serializer_write_string ( FastSerializer *fast_serializer, const ep_char8_t *contents, uint32_t contents_len); void ep_fast_serializer_write_tag ( FastSerializer *fast_serializer, FastSerializerTags tag, const uint8_t *payload, uint32_t payload_len); /* * FileStream. */ #if defined(EP_INLINE_GETTER_SETTER) || defined(EP_IMPL_STREAM_GETTER_SETTER) struct _FileStream { #else struct _FileStream_Internal { #endif ep_rt_file_handle_t rt_file; }; #if !defined(EP_INLINE_GETTER_SETTER) && !defined(EP_IMPL_STREAM_GETTER_SETTER) struct _FileStream { uint8_t _internal [sizeof (struct _FileStream_Internal)]; }; #endif EP_DEFINE_GETTER(FileStream *, file_stream, ep_rt_file_handle_t, rt_file) EP_DEFINE_SETTER(FileStream *, file_stream, ep_rt_file_handle_t, rt_file) FileStream * ep_file_stream_alloc (void); void ep_file_stream_free (FileStream *file_stream); bool ep_file_stream_open_write ( FileStream *file_stream, const ep_char8_t *path); bool ep_file_stream_close (FileStream *file_stream); bool ep_file_stream_write ( FileStream *file_stream, const uint8_t *buffer, uint32_t bytes_to_write, uint32_t *bytes_written); /* * FileStreamWriter. */ #if defined(EP_INLINE_GETTER_SETTER) || defined(EP_IMPL_STREAM_GETTER_SETTER) struct _FileStreamWriter { #else struct _FileStreamWriter_Internal { #endif StreamWriter stream_writer; FileStream *file_stream; }; #if !defined(EP_INLINE_GETTER_SETTER) && !defined(EP_IMPL_STREAM_GETTER_SETTER) struct _FileStreamWriter { uint8_t _internal [sizeof (struct _FileStreamWriter_Internal)]; }; #endif EP_DEFINE_GETTER_REF(FileStreamWriter *, file_stream_writer, StreamWriter *, stream_writer) EP_DEFINE_GETTER(FileStreamWriter *, file_stream_writer, FileStream *, file_stream) FileStreamWriter * ep_file_stream_writer_alloc (const ep_char8_t *output_file_path); void ep_file_stream_writer_free (FileStreamWriter *file_stream_writer); bool ep_file_stream_writer_write ( FileStreamWriter *file_stream_writer, const uint8_t *buffer, uint32_t bytes_to_write, uint32_t *bytes_written); /* * IpcStreamWriter. */ #if defined(EP_INLINE_GETTER_SETTER) || defined(EP_IMPL_STREAM_GETTER_SETTER) struct _IpcStreamWriter { #else struct _IpcStreamWriter_Internal { #endif StreamWriter stream_writer; IpcStream *ipc_stream; }; #if !defined(EP_INLINE_GETTER_SETTER) && !defined(EP_IMPL_STREAM_GETTER_SETTER) struct _IpcStreamWriter { uint8_t _internal [sizeof (struct _IpcStreamWriter_Internal)]; }; #endif EP_DEFINE_GETTER_REF(IpcStreamWriter *, ipc_stream_writer, StreamWriter *, stream_writer) EP_DEFINE_GETTER(IpcStreamWriter *, ipc_stream_writer, IpcStream *, ipc_stream) IpcStreamWriter * ep_ipc_stream_writer_alloc ( uint64_t id, IpcStream *stream); void ep_ipc_stream_writer_free (IpcStreamWriter *ipc_stream_writer); bool ep_ipc_stream_writer_write ( IpcStreamWriter *ipc_stream_writer, const uint8_t *buffer, uint32_t bytes_to_write, uint32_t *bytes_written); #endif /* ENABLE_PERFTRACING */ #endif /* __EVENTPIPE_STREAM_H__ */
-1
dotnet/runtime
66,372
Add Stopwatch.GetElapsedTime
Fixes https://github.com/dotnet/runtime/issues/65858
stephentoub
2022-03-09T01:52:28Z
2022-03-09T12:42:15Z
ca731545a58307870a0baebb0ee43eeea61f175f
c9f7f7389e8e9a00d501aef696333b67d218baac
Add Stopwatch.GetElapsedTime. Fixes https://github.com/dotnet/runtime/issues/65858
./src/libraries/System.Private.CoreLib/src/System/HashCode.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /* The xxHash32 implementation is based on the code published by Yann Collet: https://raw.githubusercontent.com/Cyan4973/xxHash/5c174cfa4e45a42f94082dc0d4539b39696afea1/xxhash.c xxHash - Fast Hash algorithm Copyright (C) 2012-2016, Yann Collet BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - xxHash homepage: http://www.xxhash.com - xxHash source repository : https://github.com/Cyan4973/xxHash */ using System.Collections.Generic; using System.ComponentModel; using System.Numerics; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; #pragma warning disable CA1066 // Implement IEquatable when overriding Object.Equals namespace System { // xxHash32 is used for the hash code. // https://github.com/Cyan4973/xxHash public struct HashCode { private static readonly uint s_seed = GenerateGlobalSeed(); private const uint Prime1 = 2654435761U; private const uint Prime2 = 2246822519U; private const uint Prime3 = 3266489917U; private const uint Prime4 = 668265263U; private const uint Prime5 = 374761393U; private uint _v1, _v2, _v3, _v4; private uint _queue1, _queue2, _queue3; private uint _length; private static unsafe uint GenerateGlobalSeed() { uint result; Interop.GetRandomBytes((byte*)&result, sizeof(uint)); return result; } public static int Combine<T1>(T1 value1) { // Provide a way of diffusing bits from something with a limited // input hash space. For example, many enums only have a few // possible hashes, only using the bottom few bits of the code. Some // collections are built on the assumption that hashes are spread // over a larger space, so diffusing the bits may help the // collection work more efficiently. uint hc1 = (uint)(value1?.GetHashCode() ?? 0); uint hash = MixEmptyState(); hash += 4; hash = QueueRound(hash, hc1); hash = MixFinal(hash); return (int)hash; } public static int Combine<T1, T2>(T1 value1, T2 value2) { uint hc1 = (uint)(value1?.GetHashCode() ?? 0); uint hc2 = (uint)(value2?.GetHashCode() ?? 0); uint hash = MixEmptyState(); hash += 8; hash = QueueRound(hash, hc1); hash = QueueRound(hash, hc2); hash = MixFinal(hash); return (int)hash; } public static int Combine<T1, T2, T3>(T1 value1, T2 value2, T3 value3) { uint hc1 = (uint)(value1?.GetHashCode() ?? 0); uint hc2 = (uint)(value2?.GetHashCode() ?? 0); uint hc3 = (uint)(value3?.GetHashCode() ?? 0); uint hash = MixEmptyState(); hash += 12; hash = QueueRound(hash, hc1); hash = QueueRound(hash, hc2); hash = QueueRound(hash, hc3); hash = MixFinal(hash); return (int)hash; } public static int Combine<T1, T2, T3, T4>(T1 value1, T2 value2, T3 value3, T4 value4) { uint hc1 = (uint)(value1?.GetHashCode() ?? 0); uint hc2 = (uint)(value2?.GetHashCode() ?? 0); uint hc3 = (uint)(value3?.GetHashCode() ?? 0); uint hc4 = (uint)(value4?.GetHashCode() ?? 0); Initialize(out uint v1, out uint v2, out uint v3, out uint v4); v1 = Round(v1, hc1); v2 = Round(v2, hc2); v3 = Round(v3, hc3); v4 = Round(v4, hc4); uint hash = MixState(v1, v2, v3, v4); hash += 16; hash = MixFinal(hash); return (int)hash; } public static int Combine<T1, T2, T3, T4, T5>(T1 value1, T2 value2, T3 value3, T4 value4, T5 value5) { uint hc1 = (uint)(value1?.GetHashCode() ?? 0); uint hc2 = (uint)(value2?.GetHashCode() ?? 0); uint hc3 = (uint)(value3?.GetHashCode() ?? 0); uint hc4 = (uint)(value4?.GetHashCode() ?? 0); uint hc5 = (uint)(value5?.GetHashCode() ?? 0); Initialize(out uint v1, out uint v2, out uint v3, out uint v4); v1 = Round(v1, hc1); v2 = Round(v2, hc2); v3 = Round(v3, hc3); v4 = Round(v4, hc4); uint hash = MixState(v1, v2, v3, v4); hash += 20; hash = QueueRound(hash, hc5); hash = MixFinal(hash); return (int)hash; } public static int Combine<T1, T2, T3, T4, T5, T6>(T1 value1, T2 value2, T3 value3, T4 value4, T5 value5, T6 value6) { uint hc1 = (uint)(value1?.GetHashCode() ?? 0); uint hc2 = (uint)(value2?.GetHashCode() ?? 0); uint hc3 = (uint)(value3?.GetHashCode() ?? 0); uint hc4 = (uint)(value4?.GetHashCode() ?? 0); uint hc5 = (uint)(value5?.GetHashCode() ?? 0); uint hc6 = (uint)(value6?.GetHashCode() ?? 0); Initialize(out uint v1, out uint v2, out uint v3, out uint v4); v1 = Round(v1, hc1); v2 = Round(v2, hc2); v3 = Round(v3, hc3); v4 = Round(v4, hc4); uint hash = MixState(v1, v2, v3, v4); hash += 24; hash = QueueRound(hash, hc5); hash = QueueRound(hash, hc6); hash = MixFinal(hash); return (int)hash; } public static int Combine<T1, T2, T3, T4, T5, T6, T7>(T1 value1, T2 value2, T3 value3, T4 value4, T5 value5, T6 value6, T7 value7) { uint hc1 = (uint)(value1?.GetHashCode() ?? 0); uint hc2 = (uint)(value2?.GetHashCode() ?? 0); uint hc3 = (uint)(value3?.GetHashCode() ?? 0); uint hc4 = (uint)(value4?.GetHashCode() ?? 0); uint hc5 = (uint)(value5?.GetHashCode() ?? 0); uint hc6 = (uint)(value6?.GetHashCode() ?? 0); uint hc7 = (uint)(value7?.GetHashCode() ?? 0); Initialize(out uint v1, out uint v2, out uint v3, out uint v4); v1 = Round(v1, hc1); v2 = Round(v2, hc2); v3 = Round(v3, hc3); v4 = Round(v4, hc4); uint hash = MixState(v1, v2, v3, v4); hash += 28; hash = QueueRound(hash, hc5); hash = QueueRound(hash, hc6); hash = QueueRound(hash, hc7); hash = MixFinal(hash); return (int)hash; } public static int Combine<T1, T2, T3, T4, T5, T6, T7, T8>(T1 value1, T2 value2, T3 value3, T4 value4, T5 value5, T6 value6, T7 value7, T8 value8) { uint hc1 = (uint)(value1?.GetHashCode() ?? 0); uint hc2 = (uint)(value2?.GetHashCode() ?? 0); uint hc3 = (uint)(value3?.GetHashCode() ?? 0); uint hc4 = (uint)(value4?.GetHashCode() ?? 0); uint hc5 = (uint)(value5?.GetHashCode() ?? 0); uint hc6 = (uint)(value6?.GetHashCode() ?? 0); uint hc7 = (uint)(value7?.GetHashCode() ?? 0); uint hc8 = (uint)(value8?.GetHashCode() ?? 0); Initialize(out uint v1, out uint v2, out uint v3, out uint v4); v1 = Round(v1, hc1); v2 = Round(v2, hc2); v3 = Round(v3, hc3); v4 = Round(v4, hc4); v1 = Round(v1, hc5); v2 = Round(v2, hc6); v3 = Round(v3, hc7); v4 = Round(v4, hc8); uint hash = MixState(v1, v2, v3, v4); hash += 32; hash = MixFinal(hash); return (int)hash; } [MethodImpl(MethodImplOptions.AggressiveInlining)] private static void Initialize(out uint v1, out uint v2, out uint v3, out uint v4) { v1 = s_seed + Prime1 + Prime2; v2 = s_seed + Prime2; v3 = s_seed; v4 = s_seed - Prime1; } [MethodImpl(MethodImplOptions.AggressiveInlining)] private static uint Round(uint hash, uint input) { return BitOperations.RotateLeft(hash + input * Prime2, 13) * Prime1; } [MethodImpl(MethodImplOptions.AggressiveInlining)] private static uint QueueRound(uint hash, uint queuedValue) { return BitOperations.RotateLeft(hash + queuedValue * Prime3, 17) * Prime4; } [MethodImpl(MethodImplOptions.AggressiveInlining)] private static uint MixState(uint v1, uint v2, uint v3, uint v4) { return BitOperations.RotateLeft(v1, 1) + BitOperations.RotateLeft(v2, 7) + BitOperations.RotateLeft(v3, 12) + BitOperations.RotateLeft(v4, 18); } private static uint MixEmptyState() { return s_seed + Prime5; } [MethodImpl(MethodImplOptions.AggressiveInlining)] private static uint MixFinal(uint hash) { hash ^= hash >> 15; hash *= Prime2; hash ^= hash >> 13; hash *= Prime3; hash ^= hash >> 16; return hash; } public void Add<T>(T value) { Add(value?.GetHashCode() ?? 0); } public void Add<T>(T value, IEqualityComparer<T>? comparer) { Add(value is null ? 0 : (comparer?.GetHashCode(value) ?? value.GetHashCode())); } /// <summary>Adds a span of bytes to the hash code.</summary> /// <param name="value">The span.</param> /// <remarks> /// This method does not guarantee that the result of adding a span of bytes will match /// the result of adding the same bytes individually. /// </remarks> public void AddBytes(ReadOnlySpan<byte> value) { ref byte pos = ref MemoryMarshal.GetReference(value); ref byte end = ref Unsafe.Add(ref pos, value.Length); // Add four bytes at a time until the input has fewer than four bytes remaining. while ((nint)Unsafe.ByteOffset(ref pos, ref end) >= sizeof(int)) { Add(Unsafe.ReadUnaligned<int>(ref pos)); pos = ref Unsafe.Add(ref pos, sizeof(int)); } // Add the remaining bytes a single byte at a time. while (Unsafe.IsAddressLessThan(ref pos, ref end)) { Add((int)pos); pos = ref Unsafe.Add(ref pos, 1); } } private void Add(int value) { // The original xxHash works as follows: // 0. Initialize immediately. We can't do this in a struct (no // default ctor). // 1. Accumulate blocks of length 16 (4 uints) into 4 accumulators. // 2. Accumulate remaining blocks of length 4 (1 uint) into the // hash. // 3. Accumulate remaining blocks of length 1 into the hash. // There is no need for #3 as this type only accepts ints. _queue1, // _queue2 and _queue3 are basically a buffer so that when // ToHashCode is called we can execute #2 correctly. // We need to initialize the xxHash32 state (_v1 to _v4) lazily (see // #0) nd the last place that can be done if you look at the // original code is just before the first block of 16 bytes is mixed // in. The xxHash32 state is never used for streams containing fewer // than 16 bytes. // To see what's really going on here, have a look at the Combine // methods. uint val = (uint)value; // Storing the value of _length locally shaves of quite a few bytes // in the resulting machine code. uint previousLength = _length++; uint position = previousLength % 4; // Switch can't be inlined. if (position == 0) _queue1 = val; else if (position == 1) _queue2 = val; else if (position == 2) _queue3 = val; else // position == 3 { if (previousLength == 3) Initialize(out _v1, out _v2, out _v3, out _v4); _v1 = Round(_v1, _queue1); _v2 = Round(_v2, _queue2); _v3 = Round(_v3, _queue3); _v4 = Round(_v4, val); } } public int ToHashCode() { // Storing the value of _length locally shaves of quite a few bytes // in the resulting machine code. uint length = _length; // position refers to the *next* queue position in this method, so // position == 1 means that _queue1 is populated; _queue2 would have // been populated on the next call to Add. uint position = length % 4; // If the length is less than 4, _v1 to _v4 don't contain anything // yet. xxHash32 treats this differently. uint hash = length < 4 ? MixEmptyState() : MixState(_v1, _v2, _v3, _v4); // _length is incremented once per Add(Int32) and is therefore 4 // times too small (xxHash length is in bytes, not ints). hash += length * 4; // Mix what remains in the queue // Switch can't be inlined right now, so use as few branches as // possible by manually excluding impossible scenarios (position > 1 // is always false if position is not > 0). if (position > 0) { hash = QueueRound(hash, _queue1); if (position > 1) { hash = QueueRound(hash, _queue2); if (position > 2) hash = QueueRound(hash, _queue3); } } hash = MixFinal(hash); return (int)hash; } #pragma warning disable 0809 // Obsolete member 'memberA' overrides non-obsolete member 'memberB'. // Disallowing GetHashCode and Equals is by design // * We decided to not override GetHashCode() to produce the hash code // as this would be weird, both naming-wise as well as from a // behavioral standpoint (GetHashCode() should return the object's // hash code, not the one being computed). // * Even though ToHashCode() can be called safely multiple times on // this implementation, it is not part of the contract. If the // implementation has to change in the future we don't want to worry // about people who might have incorrectly used this type. [Obsolete("HashCode is a mutable struct and should not be compared with other HashCodes. Use ToHashCode to retrieve the computed hash code.", error: true)] [EditorBrowsable(EditorBrowsableState.Never)] public override int GetHashCode() => throw new NotSupportedException(SR.HashCode_HashCodeNotSupported); [Obsolete("HashCode is a mutable struct and should not be compared with other HashCodes.", error: true)] [EditorBrowsable(EditorBrowsableState.Never)] public override bool Equals(object? obj) => throw new NotSupportedException(SR.HashCode_EqualityNotSupported); #pragma warning restore 0809 } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /* The xxHash32 implementation is based on the code published by Yann Collet: https://raw.githubusercontent.com/Cyan4973/xxHash/5c174cfa4e45a42f94082dc0d4539b39696afea1/xxhash.c xxHash - Fast Hash algorithm Copyright (C) 2012-2016, Yann Collet BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - xxHash homepage: http://www.xxhash.com - xxHash source repository : https://github.com/Cyan4973/xxHash */ using System.Collections.Generic; using System.ComponentModel; using System.Numerics; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; #pragma warning disable CA1066 // Implement IEquatable when overriding Object.Equals namespace System { // xxHash32 is used for the hash code. // https://github.com/Cyan4973/xxHash public struct HashCode { private static readonly uint s_seed = GenerateGlobalSeed(); private const uint Prime1 = 2654435761U; private const uint Prime2 = 2246822519U; private const uint Prime3 = 3266489917U; private const uint Prime4 = 668265263U; private const uint Prime5 = 374761393U; private uint _v1, _v2, _v3, _v4; private uint _queue1, _queue2, _queue3; private uint _length; private static unsafe uint GenerateGlobalSeed() { uint result; Interop.GetRandomBytes((byte*)&result, sizeof(uint)); return result; } public static int Combine<T1>(T1 value1) { // Provide a way of diffusing bits from something with a limited // input hash space. For example, many enums only have a few // possible hashes, only using the bottom few bits of the code. Some // collections are built on the assumption that hashes are spread // over a larger space, so diffusing the bits may help the // collection work more efficiently. uint hc1 = (uint)(value1?.GetHashCode() ?? 0); uint hash = MixEmptyState(); hash += 4; hash = QueueRound(hash, hc1); hash = MixFinal(hash); return (int)hash; } public static int Combine<T1, T2>(T1 value1, T2 value2) { uint hc1 = (uint)(value1?.GetHashCode() ?? 0); uint hc2 = (uint)(value2?.GetHashCode() ?? 0); uint hash = MixEmptyState(); hash += 8; hash = QueueRound(hash, hc1); hash = QueueRound(hash, hc2); hash = MixFinal(hash); return (int)hash; } public static int Combine<T1, T2, T3>(T1 value1, T2 value2, T3 value3) { uint hc1 = (uint)(value1?.GetHashCode() ?? 0); uint hc2 = (uint)(value2?.GetHashCode() ?? 0); uint hc3 = (uint)(value3?.GetHashCode() ?? 0); uint hash = MixEmptyState(); hash += 12; hash = QueueRound(hash, hc1); hash = QueueRound(hash, hc2); hash = QueueRound(hash, hc3); hash = MixFinal(hash); return (int)hash; } public static int Combine<T1, T2, T3, T4>(T1 value1, T2 value2, T3 value3, T4 value4) { uint hc1 = (uint)(value1?.GetHashCode() ?? 0); uint hc2 = (uint)(value2?.GetHashCode() ?? 0); uint hc3 = (uint)(value3?.GetHashCode() ?? 0); uint hc4 = (uint)(value4?.GetHashCode() ?? 0); Initialize(out uint v1, out uint v2, out uint v3, out uint v4); v1 = Round(v1, hc1); v2 = Round(v2, hc2); v3 = Round(v3, hc3); v4 = Round(v4, hc4); uint hash = MixState(v1, v2, v3, v4); hash += 16; hash = MixFinal(hash); return (int)hash; } public static int Combine<T1, T2, T3, T4, T5>(T1 value1, T2 value2, T3 value3, T4 value4, T5 value5) { uint hc1 = (uint)(value1?.GetHashCode() ?? 0); uint hc2 = (uint)(value2?.GetHashCode() ?? 0); uint hc3 = (uint)(value3?.GetHashCode() ?? 0); uint hc4 = (uint)(value4?.GetHashCode() ?? 0); uint hc5 = (uint)(value5?.GetHashCode() ?? 0); Initialize(out uint v1, out uint v2, out uint v3, out uint v4); v1 = Round(v1, hc1); v2 = Round(v2, hc2); v3 = Round(v3, hc3); v4 = Round(v4, hc4); uint hash = MixState(v1, v2, v3, v4); hash += 20; hash = QueueRound(hash, hc5); hash = MixFinal(hash); return (int)hash; } public static int Combine<T1, T2, T3, T4, T5, T6>(T1 value1, T2 value2, T3 value3, T4 value4, T5 value5, T6 value6) { uint hc1 = (uint)(value1?.GetHashCode() ?? 0); uint hc2 = (uint)(value2?.GetHashCode() ?? 0); uint hc3 = (uint)(value3?.GetHashCode() ?? 0); uint hc4 = (uint)(value4?.GetHashCode() ?? 0); uint hc5 = (uint)(value5?.GetHashCode() ?? 0); uint hc6 = (uint)(value6?.GetHashCode() ?? 0); Initialize(out uint v1, out uint v2, out uint v3, out uint v4); v1 = Round(v1, hc1); v2 = Round(v2, hc2); v3 = Round(v3, hc3); v4 = Round(v4, hc4); uint hash = MixState(v1, v2, v3, v4); hash += 24; hash = QueueRound(hash, hc5); hash = QueueRound(hash, hc6); hash = MixFinal(hash); return (int)hash; } public static int Combine<T1, T2, T3, T4, T5, T6, T7>(T1 value1, T2 value2, T3 value3, T4 value4, T5 value5, T6 value6, T7 value7) { uint hc1 = (uint)(value1?.GetHashCode() ?? 0); uint hc2 = (uint)(value2?.GetHashCode() ?? 0); uint hc3 = (uint)(value3?.GetHashCode() ?? 0); uint hc4 = (uint)(value4?.GetHashCode() ?? 0); uint hc5 = (uint)(value5?.GetHashCode() ?? 0); uint hc6 = (uint)(value6?.GetHashCode() ?? 0); uint hc7 = (uint)(value7?.GetHashCode() ?? 0); Initialize(out uint v1, out uint v2, out uint v3, out uint v4); v1 = Round(v1, hc1); v2 = Round(v2, hc2); v3 = Round(v3, hc3); v4 = Round(v4, hc4); uint hash = MixState(v1, v2, v3, v4); hash += 28; hash = QueueRound(hash, hc5); hash = QueueRound(hash, hc6); hash = QueueRound(hash, hc7); hash = MixFinal(hash); return (int)hash; } public static int Combine<T1, T2, T3, T4, T5, T6, T7, T8>(T1 value1, T2 value2, T3 value3, T4 value4, T5 value5, T6 value6, T7 value7, T8 value8) { uint hc1 = (uint)(value1?.GetHashCode() ?? 0); uint hc2 = (uint)(value2?.GetHashCode() ?? 0); uint hc3 = (uint)(value3?.GetHashCode() ?? 0); uint hc4 = (uint)(value4?.GetHashCode() ?? 0); uint hc5 = (uint)(value5?.GetHashCode() ?? 0); uint hc6 = (uint)(value6?.GetHashCode() ?? 0); uint hc7 = (uint)(value7?.GetHashCode() ?? 0); uint hc8 = (uint)(value8?.GetHashCode() ?? 0); Initialize(out uint v1, out uint v2, out uint v3, out uint v4); v1 = Round(v1, hc1); v2 = Round(v2, hc2); v3 = Round(v3, hc3); v4 = Round(v4, hc4); v1 = Round(v1, hc5); v2 = Round(v2, hc6); v3 = Round(v3, hc7); v4 = Round(v4, hc8); uint hash = MixState(v1, v2, v3, v4); hash += 32; hash = MixFinal(hash); return (int)hash; } [MethodImpl(MethodImplOptions.AggressiveInlining)] private static void Initialize(out uint v1, out uint v2, out uint v3, out uint v4) { v1 = s_seed + Prime1 + Prime2; v2 = s_seed + Prime2; v3 = s_seed; v4 = s_seed - Prime1; } [MethodImpl(MethodImplOptions.AggressiveInlining)] private static uint Round(uint hash, uint input) { return BitOperations.RotateLeft(hash + input * Prime2, 13) * Prime1; } [MethodImpl(MethodImplOptions.AggressiveInlining)] private static uint QueueRound(uint hash, uint queuedValue) { return BitOperations.RotateLeft(hash + queuedValue * Prime3, 17) * Prime4; } [MethodImpl(MethodImplOptions.AggressiveInlining)] private static uint MixState(uint v1, uint v2, uint v3, uint v4) { return BitOperations.RotateLeft(v1, 1) + BitOperations.RotateLeft(v2, 7) + BitOperations.RotateLeft(v3, 12) + BitOperations.RotateLeft(v4, 18); } private static uint MixEmptyState() { return s_seed + Prime5; } [MethodImpl(MethodImplOptions.AggressiveInlining)] private static uint MixFinal(uint hash) { hash ^= hash >> 15; hash *= Prime2; hash ^= hash >> 13; hash *= Prime3; hash ^= hash >> 16; return hash; } public void Add<T>(T value) { Add(value?.GetHashCode() ?? 0); } public void Add<T>(T value, IEqualityComparer<T>? comparer) { Add(value is null ? 0 : (comparer?.GetHashCode(value) ?? value.GetHashCode())); } /// <summary>Adds a span of bytes to the hash code.</summary> /// <param name="value">The span.</param> /// <remarks> /// This method does not guarantee that the result of adding a span of bytes will match /// the result of adding the same bytes individually. /// </remarks> public void AddBytes(ReadOnlySpan<byte> value) { ref byte pos = ref MemoryMarshal.GetReference(value); ref byte end = ref Unsafe.Add(ref pos, value.Length); // Add four bytes at a time until the input has fewer than four bytes remaining. while ((nint)Unsafe.ByteOffset(ref pos, ref end) >= sizeof(int)) { Add(Unsafe.ReadUnaligned<int>(ref pos)); pos = ref Unsafe.Add(ref pos, sizeof(int)); } // Add the remaining bytes a single byte at a time. while (Unsafe.IsAddressLessThan(ref pos, ref end)) { Add((int)pos); pos = ref Unsafe.Add(ref pos, 1); } } private void Add(int value) { // The original xxHash works as follows: // 0. Initialize immediately. We can't do this in a struct (no // default ctor). // 1. Accumulate blocks of length 16 (4 uints) into 4 accumulators. // 2. Accumulate remaining blocks of length 4 (1 uint) into the // hash. // 3. Accumulate remaining blocks of length 1 into the hash. // There is no need for #3 as this type only accepts ints. _queue1, // _queue2 and _queue3 are basically a buffer so that when // ToHashCode is called we can execute #2 correctly. // We need to initialize the xxHash32 state (_v1 to _v4) lazily (see // #0) nd the last place that can be done if you look at the // original code is just before the first block of 16 bytes is mixed // in. The xxHash32 state is never used for streams containing fewer // than 16 bytes. // To see what's really going on here, have a look at the Combine // methods. uint val = (uint)value; // Storing the value of _length locally shaves of quite a few bytes // in the resulting machine code. uint previousLength = _length++; uint position = previousLength % 4; // Switch can't be inlined. if (position == 0) _queue1 = val; else if (position == 1) _queue2 = val; else if (position == 2) _queue3 = val; else // position == 3 { if (previousLength == 3) Initialize(out _v1, out _v2, out _v3, out _v4); _v1 = Round(_v1, _queue1); _v2 = Round(_v2, _queue2); _v3 = Round(_v3, _queue3); _v4 = Round(_v4, val); } } public int ToHashCode() { // Storing the value of _length locally shaves of quite a few bytes // in the resulting machine code. uint length = _length; // position refers to the *next* queue position in this method, so // position == 1 means that _queue1 is populated; _queue2 would have // been populated on the next call to Add. uint position = length % 4; // If the length is less than 4, _v1 to _v4 don't contain anything // yet. xxHash32 treats this differently. uint hash = length < 4 ? MixEmptyState() : MixState(_v1, _v2, _v3, _v4); // _length is incremented once per Add(Int32) and is therefore 4 // times too small (xxHash length is in bytes, not ints). hash += length * 4; // Mix what remains in the queue // Switch can't be inlined right now, so use as few branches as // possible by manually excluding impossible scenarios (position > 1 // is always false if position is not > 0). if (position > 0) { hash = QueueRound(hash, _queue1); if (position > 1) { hash = QueueRound(hash, _queue2); if (position > 2) hash = QueueRound(hash, _queue3); } } hash = MixFinal(hash); return (int)hash; } #pragma warning disable 0809 // Obsolete member 'memberA' overrides non-obsolete member 'memberB'. // Disallowing GetHashCode and Equals is by design // * We decided to not override GetHashCode() to produce the hash code // as this would be weird, both naming-wise as well as from a // behavioral standpoint (GetHashCode() should return the object's // hash code, not the one being computed). // * Even though ToHashCode() can be called safely multiple times on // this implementation, it is not part of the contract. If the // implementation has to change in the future we don't want to worry // about people who might have incorrectly used this type. [Obsolete("HashCode is a mutable struct and should not be compared with other HashCodes. Use ToHashCode to retrieve the computed hash code.", error: true)] [EditorBrowsable(EditorBrowsableState.Never)] public override int GetHashCode() => throw new NotSupportedException(SR.HashCode_HashCodeNotSupported); [Obsolete("HashCode is a mutable struct and should not be compared with other HashCodes.", error: true)] [EditorBrowsable(EditorBrowsableState.Never)] public override bool Equals(object? obj) => throw new NotSupportedException(SR.HashCode_EqualityNotSupported); #pragma warning restore 0809 } }
-1
dotnet/runtime
66,372
Add Stopwatch.GetElapsedTime
Fixes https://github.com/dotnet/runtime/issues/65858
stephentoub
2022-03-09T01:52:28Z
2022-03-09T12:42:15Z
ca731545a58307870a0baebb0ee43eeea61f175f
c9f7f7389e8e9a00d501aef696333b67d218baac
Add Stopwatch.GetElapsedTime. Fixes https://github.com/dotnet/runtime/issues/65858
./src/coreclr/nativeaot/BuildIntegration/findvcvarsall.bat
@ECHO OFF SETLOCAL IF "%~1"=="" ( ECHO Usage: %~nx0 ^<arch^> GOTO :ERROR ) SET vswherePath=%ProgramFiles(x86)%\Microsoft Visual Studio\Installer\vswhere.exe IF NOT EXIST "%vswherePath%" GOTO :ERROR SET toolsSuffix=x86.x64 IF /I "%~1"=="arm64" SET toolsSuffix=ARM64 FOR /F "tokens=*" %%i IN ( '"%vswherePath%" -latest -prerelease -products * ^ -requires Microsoft.VisualStudio.Component.VC.Tools.%toolsSuffix% ^ -version [16^,18^) ^ -property installationPath' ) DO SET vsBase=%%i IF "%vsBase%"=="" GOTO :ERROR SET procArch=%PROCESSOR_ARCHITEW6432% IF "%procArch%"=="" SET procArch=%PROCESSOR_ARCHITECTURE% SET vcEnvironment=%~1 IF /I "%~1"=="x64" ( SET vcEnvironment=x86_amd64 IF /I "%procArch%"=="AMD64" SET vcEnvironment=amd64 ) IF /I "%~1"=="arm64" ( SET vcEnvironment=x86_arm64 IF /I "%procArch%"=="AMD64" SET vcEnvironment=amd64_arm64 ) CALL "%vsBase%\vc\Auxiliary\Build\vcvarsall.bat" %vcEnvironment% > NUL FOR /F "delims=" %%W IN ('where link') DO ( FOR %%A IN ("%%W") DO ECHO %%~dpA# GOTO :CAPTURE_LIB_PATHS ) GOTO :ERROR :CAPTURE_LIB_PATHS IF "%LIB%"=="" GOTO :ERROR ECHO %LIB% ENDLOCAL EXIT /B 0 :ERROR EXIT /B 1
@ECHO OFF SETLOCAL IF "%~1"=="" ( ECHO Usage: %~nx0 ^<arch^> GOTO :ERROR ) SET vswherePath=%ProgramFiles(x86)%\Microsoft Visual Studio\Installer\vswhere.exe IF NOT EXIST "%vswherePath%" GOTO :ERROR SET toolsSuffix=x86.x64 IF /I "%~1"=="arm64" SET toolsSuffix=ARM64 FOR /F "tokens=*" %%i IN ( '"%vswherePath%" -latest -prerelease -products * ^ -requires Microsoft.VisualStudio.Component.VC.Tools.%toolsSuffix% ^ -version [16^,18^) ^ -property installationPath' ) DO SET vsBase=%%i IF "%vsBase%"=="" GOTO :ERROR SET procArch=%PROCESSOR_ARCHITEW6432% IF "%procArch%"=="" SET procArch=%PROCESSOR_ARCHITECTURE% SET vcEnvironment=%~1 IF /I "%~1"=="x64" ( SET vcEnvironment=x86_amd64 IF /I "%procArch%"=="AMD64" SET vcEnvironment=amd64 ) IF /I "%~1"=="arm64" ( SET vcEnvironment=x86_arm64 IF /I "%procArch%"=="AMD64" SET vcEnvironment=amd64_arm64 ) CALL "%vsBase%\vc\Auxiliary\Build\vcvarsall.bat" %vcEnvironment% > NUL FOR /F "delims=" %%W IN ('where link') DO ( FOR %%A IN ("%%W") DO ECHO %%~dpA# GOTO :CAPTURE_LIB_PATHS ) GOTO :ERROR :CAPTURE_LIB_PATHS IF "%LIB%"=="" GOTO :ERROR ECHO %LIB% ENDLOCAL EXIT /B 0 :ERROR EXIT /B 1
-1
dotnet/runtime
66,372
Add Stopwatch.GetElapsedTime
Fixes https://github.com/dotnet/runtime/issues/65858
stephentoub
2022-03-09T01:52:28Z
2022-03-09T12:42:15Z
ca731545a58307870a0baebb0ee43eeea61f175f
c9f7f7389e8e9a00d501aef696333b67d218baac
Add Stopwatch.GetElapsedTime. Fixes https://github.com/dotnet/runtime/issues/65858
./src/libraries/Microsoft.Extensions.DependencyModel/tests/DependencyContextPathsTests.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using FluentAssertions; using Xunit; namespace Microsoft.Extensions.DependencyModel.Tests { public class DependencyContextPathsTests { [Fact] public void CreateWithNulls() { var paths = DependencyContextPaths.Create(null, null); paths.Application.Should().BeNull(); paths.SharedRuntime.Should().BeNull(); paths.NonApplicationPaths.Should().BeEmpty(); } [Fact] public void CreateWithNullFxDeps() { var paths = DependencyContextPaths.Create("foo.deps.json", null); paths.Application.Should().Be("foo.deps.json"); paths.SharedRuntime.Should().BeNull(); paths.NonApplicationPaths.Should().BeEmpty(); } [Fact] public void CreateWithDepsFilesContainingFxDeps() { var paths = DependencyContextPaths.Create("foo.deps.json;fx.deps.json", "fx.deps.json"); paths.Application.Should().Be("foo.deps.json"); paths.SharedRuntime.Should().Be("fx.deps.json"); paths.NonApplicationPaths.Should().BeEquivalentTo("fx.deps.json"); } [Fact] public void CreateWithExtraContainingFxDeps() { var paths = DependencyContextPaths.Create( "foo.deps.json;fx.deps.json;extra.deps.json;extra2.deps.json", "fx.deps.json"); paths.Application.Should().Be("foo.deps.json"); paths.SharedRuntime.Should().Be("fx.deps.json"); paths.NonApplicationPaths.Should().BeEquivalentTo("fx.deps.json", "extra.deps.json", "extra2.deps.json"); } [Fact] public void CreateWithExtraNotContainingFxDeps() { var paths = DependencyContextPaths.Create( "foo.deps.json;extra.deps.json;extra2.deps.json", "fx.deps.json"); paths.Application.Should().Be("foo.deps.json"); paths.SharedRuntime.Should().Be("fx.deps.json"); paths.NonApplicationPaths.Should().BeEquivalentTo("extra.deps.json", "extra2.deps.json"); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using FluentAssertions; using Xunit; namespace Microsoft.Extensions.DependencyModel.Tests { public class DependencyContextPathsTests { [Fact] public void CreateWithNulls() { var paths = DependencyContextPaths.Create(null, null); paths.Application.Should().BeNull(); paths.SharedRuntime.Should().BeNull(); paths.NonApplicationPaths.Should().BeEmpty(); } [Fact] public void CreateWithNullFxDeps() { var paths = DependencyContextPaths.Create("foo.deps.json", null); paths.Application.Should().Be("foo.deps.json"); paths.SharedRuntime.Should().BeNull(); paths.NonApplicationPaths.Should().BeEmpty(); } [Fact] public void CreateWithDepsFilesContainingFxDeps() { var paths = DependencyContextPaths.Create("foo.deps.json;fx.deps.json", "fx.deps.json"); paths.Application.Should().Be("foo.deps.json"); paths.SharedRuntime.Should().Be("fx.deps.json"); paths.NonApplicationPaths.Should().BeEquivalentTo("fx.deps.json"); } [Fact] public void CreateWithExtraContainingFxDeps() { var paths = DependencyContextPaths.Create( "foo.deps.json;fx.deps.json;extra.deps.json;extra2.deps.json", "fx.deps.json"); paths.Application.Should().Be("foo.deps.json"); paths.SharedRuntime.Should().Be("fx.deps.json"); paths.NonApplicationPaths.Should().BeEquivalentTo("fx.deps.json", "extra.deps.json", "extra2.deps.json"); } [Fact] public void CreateWithExtraNotContainingFxDeps() { var paths = DependencyContextPaths.Create( "foo.deps.json;extra.deps.json;extra2.deps.json", "fx.deps.json"); paths.Application.Should().Be("foo.deps.json"); paths.SharedRuntime.Should().Be("fx.deps.json"); paths.NonApplicationPaths.Should().BeEquivalentTo("extra.deps.json", "extra2.deps.json"); } } }
-1
dotnet/runtime
66,367
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled
When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
jakobbotsch
2022-03-09T00:01:53Z
2022-03-09T12:59:09Z
c9f7f7389e8e9a00d501aef696333b67d218baac
cef825fd5425203ac28d073bf9fccc33c638f179
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled. When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
./src/coreclr/jit/compiler.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Compiler XX XX XX XX Represents the method data we are currently JIT-compiling. XX XX An instance of this class is created for every method we JIT. XX XX This contains all the info needed for the method. So allocating a XX XX a new instance per method makes it thread-safe. XX XX It should be used to do all the memory management for the compiler run. XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ /*****************************************************************************/ #ifndef _COMPILER_H_ #define _COMPILER_H_ /*****************************************************************************/ #include "jit.h" #include "opcode.h" #include "varset.h" #include "jitstd.h" #include "jithashtable.h" #include "gentree.h" #include "debuginfo.h" #include "lir.h" #include "block.h" #include "inline.h" #include "jiteh.h" #include "instr.h" #include "regalloc.h" #include "sm.h" #include "cycletimer.h" #include "blockset.h" #include "arraystack.h" #include "hashbv.h" #include "jitexpandarray.h" #include "tinyarray.h" #include "valuenum.h" #include "jittelemetry.h" #include "namedintrinsiclist.h" #ifdef LATE_DISASM #include "disasm.h" #endif #include "codegeninterface.h" #include "regset.h" #include "jitgcinfo.h" #if DUMP_GC_TABLES && defined(JIT32_GCENCODER) #include "gcdump.h" #endif #include "emit.h" #include "hwintrinsic.h" #include "simd.h" #include "simdashwintrinsic.h" // This is only used locally in the JIT to indicate that // a verification block should be inserted #define SEH_VERIFICATION_EXCEPTION 0xe0564552 // VER /***************************************************************************** * Forward declarations */ struct InfoHdr; // defined in GCInfo.h struct escapeMapping_t; // defined in fgdiagnostic.cpp class emitter; // defined in emit.h struct ShadowParamVarInfo; // defined in GSChecks.cpp struct InitVarDscInfo; // defined in register_arg_convention.h class FgStack; // defined in fgbasic.cpp class Instrumentor; // defined in fgprofile.cpp class SpanningTreeVisitor; // defined in fgprofile.cpp class CSE_DataFlow; // defined in OptCSE.cpp class OptBoolsDsc; // defined in optimizer.cpp #ifdef DEBUG struct IndentStack; #endif class Lowering; // defined in lower.h // The following are defined in this file, Compiler.h class Compiler; /***************************************************************************** * Unwind info */ #include "unwind.h" /*****************************************************************************/ // // Declare global operator new overloads that use the compiler's arena allocator // // I wanted to make the second argument optional, with default = CMK_Unknown, but that // caused these to be ambiguous with the global placement new operators. void* __cdecl operator new(size_t n, Compiler* context, CompMemKind cmk); void* __cdecl operator new[](size_t n, Compiler* context, CompMemKind cmk); void* __cdecl operator new(size_t n, void* p, const jitstd::placement_t& syntax_difference); // Requires the definitions of "operator new" so including "LoopCloning.h" after the definitions. #include "loopcloning.h" /*****************************************************************************/ /* This is included here and not earlier as it needs the definition of "CSE" * which is defined in the section above */ /*****************************************************************************/ unsigned genLog2(unsigned value); unsigned genLog2(unsigned __int64 value); unsigned ReinterpretHexAsDecimal(unsigned in); /*****************************************************************************/ const unsigned FLG_CCTOR = (CORINFO_FLG_CONSTRUCTOR | CORINFO_FLG_STATIC); #ifdef DEBUG const int BAD_STK_OFFS = 0xBAADF00D; // for LclVarDsc::lvStkOffs #endif //------------------------------------------------------------------------ // HFA info shared by LclVarDsc and fgArgTabEntry //------------------------------------------------------------------------ inline bool IsHfa(CorInfoHFAElemType kind) { return kind != CORINFO_HFA_ELEM_NONE; } inline var_types HfaTypeFromElemKind(CorInfoHFAElemType kind) { switch (kind) { case CORINFO_HFA_ELEM_FLOAT: return TYP_FLOAT; case CORINFO_HFA_ELEM_DOUBLE: return TYP_DOUBLE; #ifdef FEATURE_SIMD case CORINFO_HFA_ELEM_VECTOR64: return TYP_SIMD8; case CORINFO_HFA_ELEM_VECTOR128: return TYP_SIMD16; #endif case CORINFO_HFA_ELEM_NONE: return TYP_UNDEF; default: assert(!"Invalid HfaElemKind"); return TYP_UNDEF; } } inline CorInfoHFAElemType HfaElemKindFromType(var_types type) { switch (type) { case TYP_FLOAT: return CORINFO_HFA_ELEM_FLOAT; case TYP_DOUBLE: return CORINFO_HFA_ELEM_DOUBLE; #ifdef FEATURE_SIMD case TYP_SIMD8: return CORINFO_HFA_ELEM_VECTOR64; case TYP_SIMD16: return CORINFO_HFA_ELEM_VECTOR128; #endif case TYP_UNDEF: return CORINFO_HFA_ELEM_NONE; default: assert(!"Invalid HFA Type"); return CORINFO_HFA_ELEM_NONE; } } // The following holds the Local var info (scope information) typedef const char* VarName; // Actual ASCII string struct VarScopeDsc { unsigned vsdVarNum; // (remapped) LclVarDsc number unsigned vsdLVnum; // 'which' in eeGetLVinfo(). // Also, it is the index of this entry in the info.compVarScopes array, // which is useful since the array is also accessed via the // compEnterScopeList and compExitScopeList sorted arrays. IL_OFFSET vsdLifeBeg; // instr offset of beg of life IL_OFFSET vsdLifeEnd; // instr offset of end of life #ifdef DEBUG VarName vsdName; // name of the var #endif }; // This class stores information associated with a LclVar SSA definition. class LclSsaVarDsc { // The basic block where the definition occurs. Definitions of uninitialized variables // are considered to occur at the start of the first basic block (fgFirstBB). // // TODO-Cleanup: In the case of uninitialized variables the block is set to nullptr by // SsaBuilder and changed to fgFirstBB during value numbering. It would be useful to // investigate and perhaps eliminate this rather unexpected behavior. BasicBlock* m_block; // The GT_ASG node that generates the definition, or nullptr for definitions // of uninitialized variables. GenTreeOp* m_asg; public: LclSsaVarDsc() : m_block(nullptr), m_asg(nullptr) { } LclSsaVarDsc(BasicBlock* block, GenTreeOp* asg) : m_block(block), m_asg(asg) { assert((asg == nullptr) || asg->OperIs(GT_ASG)); } BasicBlock* GetBlock() const { return m_block; } void SetBlock(BasicBlock* block) { m_block = block; } GenTreeOp* GetAssignment() const { return m_asg; } void SetAssignment(GenTreeOp* asg) { assert((asg == nullptr) || asg->OperIs(GT_ASG)); m_asg = asg; } ValueNumPair m_vnPair; }; // This class stores information associated with a memory SSA definition. class SsaMemDef { public: ValueNumPair m_vnPair; }; //------------------------------------------------------------------------ // SsaDefArray: A resizable array of SSA definitions. // // Unlike an ordinary resizable array implementation, this allows only element // addition (by calling AllocSsaNum) and has special handling for RESERVED_SSA_NUM // (basically it's a 1-based array). The array doesn't impose any particular // requirements on the elements it stores and AllocSsaNum forwards its arguments // to the array element constructor, this way the array supports both LclSsaVarDsc // and SsaMemDef elements. // template <typename T> class SsaDefArray { T* m_array; unsigned m_arraySize; unsigned m_count; static_assert_no_msg(SsaConfig::RESERVED_SSA_NUM == 0); static_assert_no_msg(SsaConfig::FIRST_SSA_NUM == 1); // Get the minimum valid SSA number. unsigned GetMinSsaNum() const { return SsaConfig::FIRST_SSA_NUM; } // Increase (double) the size of the array. void GrowArray(CompAllocator alloc) { unsigned oldSize = m_arraySize; unsigned newSize = max(2, oldSize * 2); T* newArray = alloc.allocate<T>(newSize); for (unsigned i = 0; i < oldSize; i++) { newArray[i] = m_array[i]; } m_array = newArray; m_arraySize = newSize; } public: // Construct an empty SsaDefArray. SsaDefArray() : m_array(nullptr), m_arraySize(0), m_count(0) { } // Reset the array (used only if the SSA form is reconstructed). void Reset() { m_count = 0; } // Allocate a new SSA number (starting with SsaConfig::FIRST_SSA_NUM). template <class... Args> unsigned AllocSsaNum(CompAllocator alloc, Args&&... args) { if (m_count == m_arraySize) { GrowArray(alloc); } unsigned ssaNum = GetMinSsaNum() + m_count; m_array[m_count++] = T(std::forward<Args>(args)...); // Ensure that the first SSA number we allocate is SsaConfig::FIRST_SSA_NUM assert((ssaNum == SsaConfig::FIRST_SSA_NUM) || (m_count > 1)); return ssaNum; } // Get the number of SSA definitions in the array. unsigned GetCount() const { return m_count; } // Get a pointer to the SSA definition at the specified index. T* GetSsaDefByIndex(unsigned index) { assert(index < m_count); return &m_array[index]; } // Check if the specified SSA number is valid. bool IsValidSsaNum(unsigned ssaNum) const { return (GetMinSsaNum() <= ssaNum) && (ssaNum < (GetMinSsaNum() + m_count)); } // Get a pointer to the SSA definition associated with the specified SSA number. T* GetSsaDef(unsigned ssaNum) { assert(ssaNum != SsaConfig::RESERVED_SSA_NUM); return GetSsaDefByIndex(ssaNum - GetMinSsaNum()); } // Get an SSA number associated with the specified SSA def (that must be in this array). unsigned GetSsaNum(T* ssaDef) { assert((m_array <= ssaDef) && (ssaDef < &m_array[m_count])); return GetMinSsaNum() + static_cast<unsigned>(ssaDef - &m_array[0]); } }; enum RefCountState { RCS_INVALID, // not valid to get/set ref counts RCS_EARLY, // early counts for struct promotion and struct passing RCS_NORMAL, // normal ref counts (from lvaMarkRefs onward) }; #ifdef DEBUG // Reasons why we can't enregister a local. enum class DoNotEnregisterReason { None, AddrExposed, // the address of this local is exposed. DontEnregStructs, // struct enregistration is disabled. NotRegSizeStruct, // the struct size does not much any register size, usually the struct size is too big. LocalField, // the local is accessed with LCL_FLD, note we can do it not only for struct locals. VMNeedsStackAddr, LiveInOutOfHandler, // the local is alive in and out of exception handler and not signle def. BlockOp, // Is read or written via a block operation. IsStructArg, // Is a struct passed as an argument in a way that requires a stack location. DepField, // It is a field of a dependently promoted struct NoRegVars, // opts.compFlags & CLFLG_REGVAR is not set MinOptsGC, // It is a GC Ref and we are compiling MinOpts #if !defined(TARGET_64BIT) LongParamField, // It is a decomposed field of a long parameter. #endif #ifdef JIT32_GCENCODER PinningRef, #endif LclAddrNode, // the local is accessed with LCL_ADDR_VAR/FLD. CastTakesAddr, StoreBlkSrc, // the local is used as STORE_BLK source. OneAsgRetyping, // fgMorphOneAsgBlockOp prevents this local from being enregister. SwizzleArg, // the local is passed using LCL_FLD as another type. BlockOpRet, // the struct is returned and it promoted or there is a cast. ReturnSpCheck, // the local is used to do SP check SimdUserForcesDep // a promoted struct was used by a SIMD/HWI node; it must be dependently promoted }; enum class AddressExposedReason { NONE, PARENT_EXPOSED, // This is a promoted field but the parent is exposed. TOO_CONSERVATIVE, // Were marked as exposed to be conservative, fix these places. ESCAPE_ADDRESS, // The address is escaping, for example, passed as call argument. WIDE_INDIR, // We access via indirection with wider type. OSR_EXPOSED, // It was exposed in the original method, osr has to repeat it. STRESS_LCL_FLD, // Stress mode replaces localVar with localFld and makes them addrExposed. COPY_FLD_BY_FLD, // Field by field copy takes the address of the local, can be fixed. DISPATCH_RET_BUF // Caller return buffer dispatch. }; #endif // DEBUG class LclVarDsc { public: // The constructor. Most things can just be zero'ed. // // Initialize the ArgRegs to REG_STK. // Morph will update if this local is passed in a register. LclVarDsc() : _lvArgReg(REG_STK) , #if FEATURE_MULTIREG_ARGS _lvOtherArgReg(REG_STK) , #endif // FEATURE_MULTIREG_ARGS lvClassHnd(NO_CLASS_HANDLE) , lvRefBlks(BlockSetOps::UninitVal()) , lvPerSsaData() { } // note this only packs because var_types is a typedef of unsigned char var_types lvType : 5; // TYP_INT/LONG/FLOAT/DOUBLE/REF unsigned char lvIsParam : 1; // is this a parameter? unsigned char lvIsRegArg : 1; // is this an argument that was passed by register? unsigned char lvFramePointerBased : 1; // 0 = off of REG_SPBASE (e.g., ESP), 1 = off of REG_FPBASE (e.g., EBP) unsigned char lvOnFrame : 1; // (part of) the variable lives on the frame unsigned char lvRegister : 1; // assigned to live in a register? For RyuJIT backend, this is only set if the // variable is in the same register for the entire function. unsigned char lvTracked : 1; // is this a tracked variable? bool lvTrackedNonStruct() { return lvTracked && lvType != TYP_STRUCT; } unsigned char lvPinned : 1; // is this a pinned variable? unsigned char lvMustInit : 1; // must be initialized private: bool m_addrExposed : 1; // The address of this variable is "exposed" -- passed as an argument, stored in a // global location, etc. // We cannot reason reliably about the value of the variable. public: unsigned char lvDoNotEnregister : 1; // Do not enregister this variable. unsigned char lvFieldAccessed : 1; // The var is a struct local, and a field of the variable is accessed. Affects // struct promotion. unsigned char lvLiveInOutOfHndlr : 1; // The variable is live in or out of an exception handler, and therefore must // be on the stack (at least at those boundaries.) unsigned char lvInSsa : 1; // The variable is in SSA form (set by SsaBuilder) unsigned char lvIsCSE : 1; // Indicates if this LclVar is a CSE variable. unsigned char lvHasLdAddrOp : 1; // has ldloca or ldarga opcode on this local. unsigned char lvStackByref : 1; // This is a compiler temporary of TYP_BYREF that is known to point into our local // stack frame. unsigned char lvHasILStoreOp : 1; // there is at least one STLOC or STARG on this local unsigned char lvHasMultipleILStoreOp : 1; // there is more than one STLOC on this local unsigned char lvIsTemp : 1; // Short-lifetime compiler temp #if defined(TARGET_AMD64) || defined(TARGET_ARM64) unsigned char lvIsImplicitByRef : 1; // Set if the argument is an implicit byref. #endif // defined(TARGET_AMD64) || defined(TARGET_ARM64) unsigned char lvIsBoolean : 1; // set if variable is boolean unsigned char lvSingleDef : 1; // variable has a single def // before lvaMarkLocalVars: identifies ref type locals that can get type updates // after lvaMarkLocalVars: identifies locals that are suitable for optAddCopies unsigned char lvSingleDefRegCandidate : 1; // variable has a single def and hence is a register candidate // Currently, this is only used to decide if an EH variable can be // a register candiate or not. unsigned char lvDisqualifySingleDefRegCandidate : 1; // tracks variable that are disqualified from register // candidancy unsigned char lvSpillAtSingleDef : 1; // variable has a single def (as determined by LSRA interval scan) // and is spilled making it candidate to spill right after the // first (and only) definition. // Note: We cannot reuse lvSingleDefRegCandidate because it is set // in earlier phase and the information might not be appropriate // in LSRA. unsigned char lvDisqualify : 1; // variable is no longer OK for add copy optimization unsigned char lvVolatileHint : 1; // hint for AssertionProp #ifndef TARGET_64BIT unsigned char lvStructDoubleAlign : 1; // Must we double align this struct? #endif // !TARGET_64BIT #ifdef TARGET_64BIT unsigned char lvQuirkToLong : 1; // Quirk to allocate this LclVar as a 64-bit long #endif #ifdef DEBUG unsigned char lvKeepType : 1; // Don't change the type of this variable unsigned char lvNoLclFldStress : 1; // Can't apply local field stress on this one #endif unsigned char lvIsPtr : 1; // Might this be used in an address computation? (used by buffer overflow security // checks) unsigned char lvIsUnsafeBuffer : 1; // Does this contain an unsafe buffer requiring buffer overflow security checks? unsigned char lvPromoted : 1; // True when this local is a promoted struct, a normed struct, or a "split" long on a // 32-bit target. For implicit byref parameters, this gets hijacked between // fgRetypeImplicitByRefArgs and fgMarkDemotedImplicitByRefArgs to indicate whether // references to the arg are being rewritten as references to a promoted shadow local. unsigned char lvIsStructField : 1; // Is this local var a field of a promoted struct local? unsigned char lvOverlappingFields : 1; // True when we have a struct with possibly overlapping fields unsigned char lvContainsHoles : 1; // True when we have a promoted struct that contains holes unsigned char lvCustomLayout : 1; // True when this struct has "CustomLayout" unsigned char lvIsMultiRegArg : 1; // true if this is a multireg LclVar struct used in an argument context unsigned char lvIsMultiRegRet : 1; // true if this is a multireg LclVar struct assigned from a multireg call #ifdef FEATURE_HFA_FIELDS_PRESENT CorInfoHFAElemType _lvHfaElemKind : 3; // What kind of an HFA this is (CORINFO_HFA_ELEM_NONE if it is not an HFA). #endif // FEATURE_HFA_FIELDS_PRESENT #ifdef DEBUG // TODO-Cleanup: See the note on lvSize() - this flag is only in use by asserts that are checking for struct // types, and is needed because of cases where TYP_STRUCT is bashed to an integral type. // Consider cleaning this up so this workaround is not required. unsigned char lvUnusedStruct : 1; // All references to this promoted struct are through its field locals. // I.e. there is no longer any reference to the struct directly. // In this case we can simply remove this struct local. unsigned char lvUndoneStructPromotion : 1; // The struct promotion was undone and hence there should be no // reference to the fields of this struct. #endif unsigned char lvLRACandidate : 1; // Tracked for linear scan register allocation purposes #ifdef FEATURE_SIMD // Note that both SIMD vector args and locals are marked as lvSIMDType = true, but the // type of an arg node is TYP_BYREF and a local node is TYP_SIMD*. unsigned char lvSIMDType : 1; // This is a SIMD struct unsigned char lvUsedInSIMDIntrinsic : 1; // This tells lclvar is used for simd intrinsic unsigned char lvSimdBaseJitType : 5; // Note: this only packs because CorInfoType has less than 32 entries CorInfoType GetSimdBaseJitType() const { return (CorInfoType)lvSimdBaseJitType; } void SetSimdBaseJitType(CorInfoType simdBaseJitType) { assert(simdBaseJitType < (1 << 5)); lvSimdBaseJitType = (unsigned char)simdBaseJitType; } var_types GetSimdBaseType() const; #endif // FEATURE_SIMD unsigned char lvRegStruct : 1; // This is a reg-sized non-field-addressed struct. unsigned char lvClassIsExact : 1; // lvClassHandle is the exact type #ifdef DEBUG unsigned char lvClassInfoUpdated : 1; // true if this var has updated class handle or exactness #endif unsigned char lvImplicitlyReferenced : 1; // true if there are non-IR references to this local (prolog, epilog, gc, // eh) unsigned char lvSuppressedZeroInit : 1; // local needs zero init if we transform tail call to loop unsigned char lvHasExplicitInit : 1; // The local is explicitly initialized and doesn't need zero initialization in // the prolog. If the local has gc pointers, there are no gc-safe points // between the prolog and the explicit initialization. union { unsigned lvFieldLclStart; // The index of the local var representing the first field in the promoted struct // local. For implicit byref parameters, this gets hijacked between // fgRetypeImplicitByRefArgs and fgMarkDemotedImplicitByRefArgs to point to the // struct local created to model the parameter's struct promotion, if any. unsigned lvParentLcl; // The index of the local var representing the parent (i.e. the promoted struct local). // Valid on promoted struct local fields. }; unsigned char lvFieldCnt; // Number of fields in the promoted VarDsc. unsigned char lvFldOffset; unsigned char lvFldOrdinal; #ifdef DEBUG unsigned char lvSingleDefDisqualifyReason = 'H'; #endif #if FEATURE_MULTIREG_ARGS regNumber lvRegNumForSlot(unsigned slotNum) { if (slotNum == 0) { return (regNumber)_lvArgReg; } else if (slotNum == 1) { return GetOtherArgReg(); } else { assert(false && "Invalid slotNum!"); } unreached(); } #endif // FEATURE_MULTIREG_ARGS CorInfoHFAElemType GetLvHfaElemKind() const { #ifdef FEATURE_HFA_FIELDS_PRESENT return _lvHfaElemKind; #else NOWAY_MSG("GetLvHfaElemKind"); return CORINFO_HFA_ELEM_NONE; #endif // FEATURE_HFA_FIELDS_PRESENT } void SetLvHfaElemKind(CorInfoHFAElemType elemKind) { #ifdef FEATURE_HFA_FIELDS_PRESENT _lvHfaElemKind = elemKind; #else NOWAY_MSG("SetLvHfaElemKind"); #endif // FEATURE_HFA_FIELDS_PRESENT } bool lvIsHfa() const { if (GlobalJitOptions::compFeatureHfa) { return IsHfa(GetLvHfaElemKind()); } else { return false; } } bool lvIsHfaRegArg() const { if (GlobalJitOptions::compFeatureHfa) { return lvIsRegArg && lvIsHfa(); } else { return false; } } //------------------------------------------------------------------------------ // lvHfaSlots: Get the number of slots used by an HFA local // // Return Value: // On Arm64 - Returns 1-4 indicating the number of register slots used by the HFA // On Arm32 - Returns the total number of single FP register slots used by the HFA, max is 8 // unsigned lvHfaSlots() const { assert(lvIsHfa()); assert(varTypeIsStruct(lvType)); unsigned slots = 0; #ifdef TARGET_ARM slots = lvExactSize / sizeof(float); assert(slots <= 8); #elif defined(TARGET_ARM64) switch (GetLvHfaElemKind()) { case CORINFO_HFA_ELEM_NONE: assert(!"lvHfaSlots called for non-HFA"); break; case CORINFO_HFA_ELEM_FLOAT: assert((lvExactSize % 4) == 0); slots = lvExactSize >> 2; break; case CORINFO_HFA_ELEM_DOUBLE: case CORINFO_HFA_ELEM_VECTOR64: assert((lvExactSize % 8) == 0); slots = lvExactSize >> 3; break; case CORINFO_HFA_ELEM_VECTOR128: assert((lvExactSize % 16) == 0); slots = lvExactSize >> 4; break; default: unreached(); } assert(slots <= 4); #endif // TARGET_ARM64 return slots; } // lvIsMultiRegArgOrRet() // returns true if this is a multireg LclVar struct used in an argument context // or if this is a multireg LclVar struct assigned from a multireg call bool lvIsMultiRegArgOrRet() { return lvIsMultiRegArg || lvIsMultiRegRet; } #if defined(DEBUG) private: DoNotEnregisterReason m_doNotEnregReason; AddressExposedReason m_addrExposedReason; public: void SetDoNotEnregReason(DoNotEnregisterReason reason) { m_doNotEnregReason = reason; } DoNotEnregisterReason GetDoNotEnregReason() const { return m_doNotEnregReason; } AddressExposedReason GetAddrExposedReason() const { return m_addrExposedReason; } #endif // DEBUG public: void SetAddressExposed(bool value DEBUGARG(AddressExposedReason reason)) { m_addrExposed = value; INDEBUG(m_addrExposedReason = reason); } void CleanAddressExposed() { m_addrExposed = false; } bool IsAddressExposed() const { return m_addrExposed; } private: regNumberSmall _lvRegNum; // Used to store the register this variable is in (or, the low register of a // register pair). It is set during codegen any time the // variable is enregistered (lvRegister is only set // to non-zero if the variable gets the same register assignment for its entire // lifetime). #if !defined(TARGET_64BIT) regNumberSmall _lvOtherReg; // Used for "upper half" of long var. #endif // !defined(TARGET_64BIT) regNumberSmall _lvArgReg; // The (first) register in which this argument is passed. #if FEATURE_MULTIREG_ARGS regNumberSmall _lvOtherArgReg; // Used for the second part of the struct passed in a register. // Note this is defined but not used by ARM32 #endif // FEATURE_MULTIREG_ARGS regNumberSmall _lvArgInitReg; // the register into which the argument is moved at entry public: // The register number is stored in a small format (8 bits), but the getters return and the setters take // a full-size (unsigned) format, to localize the casts here. ///////////////////// regNumber GetRegNum() const { return (regNumber)_lvRegNum; } void SetRegNum(regNumber reg) { _lvRegNum = (regNumberSmall)reg; assert(_lvRegNum == reg); } ///////////////////// #if defined(TARGET_64BIT) regNumber GetOtherReg() const { assert(!"shouldn't get here"); // can't use "unreached();" because it's NORETURN, which causes C4072 // "unreachable code" warnings return REG_NA; } void SetOtherReg(regNumber reg) { assert(!"shouldn't get here"); // can't use "unreached();" because it's NORETURN, which causes C4072 // "unreachable code" warnings } #else // !TARGET_64BIT regNumber GetOtherReg() const { return (regNumber)_lvOtherReg; } void SetOtherReg(regNumber reg) { _lvOtherReg = (regNumberSmall)reg; assert(_lvOtherReg == reg); } #endif // !TARGET_64BIT ///////////////////// regNumber GetArgReg() const { return (regNumber)_lvArgReg; } void SetArgReg(regNumber reg) { _lvArgReg = (regNumberSmall)reg; assert(_lvArgReg == reg); } #if FEATURE_MULTIREG_ARGS regNumber GetOtherArgReg() const { return (regNumber)_lvOtherArgReg; } void SetOtherArgReg(regNumber reg) { _lvOtherArgReg = (regNumberSmall)reg; assert(_lvOtherArgReg == reg); } #endif // FEATURE_MULTIREG_ARGS #ifdef FEATURE_SIMD // Is this is a SIMD struct? bool lvIsSIMDType() const { return lvSIMDType; } // Is this is a SIMD struct which is used for SIMD intrinsic? bool lvIsUsedInSIMDIntrinsic() const { return lvUsedInSIMDIntrinsic; } #else // If feature_simd not enabled, return false bool lvIsSIMDType() const { return false; } bool lvIsUsedInSIMDIntrinsic() const { return false; } #endif ///////////////////// regNumber GetArgInitReg() const { return (regNumber)_lvArgInitReg; } void SetArgInitReg(regNumber reg) { _lvArgInitReg = (regNumberSmall)reg; assert(_lvArgInitReg == reg); } ///////////////////// bool lvIsRegCandidate() const { return lvLRACandidate != 0; } bool lvIsInReg() const { return lvIsRegCandidate() && (GetRegNum() != REG_STK); } regMaskTP lvRegMask() const { regMaskTP regMask = RBM_NONE; if (varTypeUsesFloatReg(TypeGet())) { if (GetRegNum() != REG_STK) { regMask = genRegMaskFloat(GetRegNum(), TypeGet()); } } else { if (GetRegNum() != REG_STK) { regMask = genRegMask(GetRegNum()); } } return regMask; } unsigned short lvVarIndex; // variable tracking index private: unsigned short m_lvRefCnt; // unweighted (real) reference count. For implicit by reference // parameters, this gets hijacked from fgResetImplicitByRefRefCount // through fgMarkDemotedImplicitByRefArgs, to provide a static // appearance count (computed during address-exposed analysis) // that fgMakeOutgoingStructArgCopy consults during global morph // to determine if eliding its copy is legal. weight_t m_lvRefCntWtd; // weighted reference count public: unsigned short lvRefCnt(RefCountState state = RCS_NORMAL) const; void incLvRefCnt(unsigned short delta, RefCountState state = RCS_NORMAL); void setLvRefCnt(unsigned short newValue, RefCountState state = RCS_NORMAL); weight_t lvRefCntWtd(RefCountState state = RCS_NORMAL) const; void incLvRefCntWtd(weight_t delta, RefCountState state = RCS_NORMAL); void setLvRefCntWtd(weight_t newValue, RefCountState state = RCS_NORMAL); private: int lvStkOffs; // stack offset of home in bytes. public: int GetStackOffset() const { return lvStkOffs; } void SetStackOffset(int offset) { lvStkOffs = offset; } unsigned lvExactSize; // (exact) size of the type in bytes // Is this a promoted struct? // This method returns true only for structs (including SIMD structs), not for // locals that are split on a 32-bit target. // It is only necessary to use this: // 1) if only structs are wanted, and // 2) if Lowering has already been done. // Otherwise lvPromoted is valid. bool lvPromotedStruct() { #if !defined(TARGET_64BIT) return (lvPromoted && !varTypeIsLong(lvType)); #else // defined(TARGET_64BIT) return lvPromoted; #endif // defined(TARGET_64BIT) } unsigned lvSize() const; size_t lvArgStackSize() const; unsigned lvSlotNum; // original slot # (if remapped) typeInfo lvVerTypeInfo; // type info needed for verification // class handle for the local or null if not known or not a class, // for a struct handle use `GetStructHnd()`. CORINFO_CLASS_HANDLE lvClassHnd; // Get class handle for a struct local or implicitByRef struct local. CORINFO_CLASS_HANDLE GetStructHnd() const { #ifdef FEATURE_SIMD if (lvSIMDType && (m_layout == nullptr)) { return NO_CLASS_HANDLE; } #endif assert(m_layout != nullptr); #if defined(TARGET_AMD64) || defined(TARGET_ARM64) assert(varTypeIsStruct(TypeGet()) || (lvIsImplicitByRef && (TypeGet() == TYP_BYREF))); #else assert(varTypeIsStruct(TypeGet())); #endif CORINFO_CLASS_HANDLE structHnd = m_layout->GetClassHandle(); assert(structHnd != NO_CLASS_HANDLE); return structHnd; } CORINFO_FIELD_HANDLE lvFieldHnd; // field handle for promoted struct fields private: ClassLayout* m_layout; // layout info for structs public: BlockSet lvRefBlks; // Set of blocks that contain refs Statement* lvDefStmt; // Pointer to the statement with the single definition void lvaDisqualifyVar(); // Call to disqualify a local variable from use in optAddCopies var_types TypeGet() const { return (var_types)lvType; } bool lvStackAligned() const { assert(lvIsStructField); return ((lvFldOffset % TARGET_POINTER_SIZE) == 0); } bool lvNormalizeOnLoad() const { return varTypeIsSmall(TypeGet()) && // lvIsStructField is treated the same as the aliased local, see fgDoNormalizeOnStore. (lvIsParam || m_addrExposed || lvIsStructField); } bool lvNormalizeOnStore() const { return varTypeIsSmall(TypeGet()) && // lvIsStructField is treated the same as the aliased local, see fgDoNormalizeOnStore. !(lvIsParam || m_addrExposed || lvIsStructField); } void incRefCnts(weight_t weight, Compiler* pComp, RefCountState state = RCS_NORMAL, bool propagate = true); var_types GetHfaType() const { if (GlobalJitOptions::compFeatureHfa) { assert(lvIsHfa()); return HfaTypeFromElemKind(GetLvHfaElemKind()); } else { return TYP_UNDEF; } } void SetHfaType(var_types type) { if (GlobalJitOptions::compFeatureHfa) { CorInfoHFAElemType elemKind = HfaElemKindFromType(type); SetLvHfaElemKind(elemKind); // Ensure we've allocated enough bits. assert(GetLvHfaElemKind() == elemKind); } } // Returns true if this variable contains GC pointers (including being a GC pointer itself). bool HasGCPtr() const { return varTypeIsGC(lvType) || ((lvType == TYP_STRUCT) && m_layout->HasGCPtr()); } // Returns the layout of a struct variable. ClassLayout* GetLayout() const { assert(varTypeIsStruct(lvType)); return m_layout; } // Sets the layout of a struct variable. void SetLayout(ClassLayout* layout) { assert(varTypeIsStruct(lvType)); assert((m_layout == nullptr) || ClassLayout::AreCompatible(m_layout, layout)); m_layout = layout; } SsaDefArray<LclSsaVarDsc> lvPerSsaData; // Returns the address of the per-Ssa data for the given ssaNum (which is required // not to be the SsaConfig::RESERVED_SSA_NUM, which indicates that the variable is // not an SSA variable). LclSsaVarDsc* GetPerSsaData(unsigned ssaNum) { return lvPerSsaData.GetSsaDef(ssaNum); } // Returns the SSA number for "ssaDef". Requires "ssaDef" to be a valid definition // of this variable. unsigned GetSsaNumForSsaDef(LclSsaVarDsc* ssaDef) { return lvPerSsaData.GetSsaNum(ssaDef); } var_types GetRegisterType(const GenTreeLclVarCommon* tree) const; var_types GetRegisterType() const; var_types GetActualRegisterType() const; bool IsEnregisterableType() const { return GetRegisterType() != TYP_UNDEF; } bool IsEnregisterableLcl() const { if (lvDoNotEnregister) { return false; } return IsEnregisterableType(); } //----------------------------------------------------------------------------- // IsAlwaysAliveInMemory: Determines if this variable's value is always // up-to-date on stack. This is possible if this is an EH-var or // we decided to spill after single-def. // bool IsAlwaysAliveInMemory() const { return lvLiveInOutOfHndlr || lvSpillAtSingleDef; } bool CanBeReplacedWithItsField(Compiler* comp) const; #ifdef DEBUG public: const char* lvReason; void PrintVarReg() const { printf("%s", getRegName(GetRegNum())); } #endif // DEBUG }; // class LclVarDsc enum class SymbolicIntegerValue : int32_t { LongMin, IntMin, ShortMin, ByteMin, Zero, One, ByteMax, UByteMax, ShortMax, UShortMax, IntMax, UIntMax, LongMax, }; inline constexpr bool operator>(SymbolicIntegerValue left, SymbolicIntegerValue right) { return static_cast<int32_t>(left) > static_cast<int32_t>(right); } inline constexpr bool operator>=(SymbolicIntegerValue left, SymbolicIntegerValue right) { return static_cast<int32_t>(left) >= static_cast<int32_t>(right); } inline constexpr bool operator<(SymbolicIntegerValue left, SymbolicIntegerValue right) { return static_cast<int32_t>(left) < static_cast<int32_t>(right); } inline constexpr bool operator<=(SymbolicIntegerValue left, SymbolicIntegerValue right) { return static_cast<int32_t>(left) <= static_cast<int32_t>(right); } // Represents an integral range useful for reasoning about integral casts. // It uses a symbolic representation for lower and upper bounds so // that it can efficiently handle integers of all sizes on all hosts. // // Note that the ranges represented by this class are **always** in the // "signed" domain. This is so that if we know the range a node produces, it // can be trivially used to determine if a cast above the node does or does not // overflow, which requires that the interpretation of integers be the same both // for the "input" and "output". We choose signed interpretation here because it // produces nice continuous ranges and because IR uses sign-extension for constants. // // Some examples of how ranges are computed for casts: // 1. CAST_OVF(ubyte <- uint): does not overflow for [0..UBYTE_MAX], produces the // same range - all casts that do not change the representation, i. e. have the same // "actual" input and output type, have the same "input" and "output" range. // 2. CAST_OVF(ulong <- uint): never oveflows => the "input" range is [INT_MIN..INT_MAX] // (aka all possible 32 bit integers). Produces [0..UINT_MAX] (aka all possible 32 // bit integers zero-extended to 64 bits). // 3. CAST_OVF(int <- uint): overflows for inputs larger than INT_MAX <=> less than 0 // when interpreting as signed => the "input" range is [0..INT_MAX], the same range // being the produced one as the node does not change the width of the integer. // class IntegralRange { private: SymbolicIntegerValue m_lowerBound; SymbolicIntegerValue m_upperBound; public: IntegralRange() = default; IntegralRange(SymbolicIntegerValue lowerBound, SymbolicIntegerValue upperBound) : m_lowerBound(lowerBound), m_upperBound(upperBound) { assert(lowerBound <= upperBound); } bool Contains(int64_t value) const; bool Contains(IntegralRange other) const { return (m_lowerBound <= other.m_lowerBound) && (other.m_upperBound <= m_upperBound); } bool IsPositive() { return m_lowerBound >= SymbolicIntegerValue::Zero; } bool Equals(IntegralRange other) const { return (m_lowerBound == other.m_lowerBound) && (m_upperBound == other.m_upperBound); } static int64_t SymbolicToRealValue(SymbolicIntegerValue value); static SymbolicIntegerValue LowerBoundForType(var_types type); static SymbolicIntegerValue UpperBoundForType(var_types type); static IntegralRange ForType(var_types type) { return {LowerBoundForType(type), UpperBoundForType(type)}; } static IntegralRange ForNode(GenTree* node, Compiler* compiler); static IntegralRange ForCastInput(GenTreeCast* cast); static IntegralRange ForCastOutput(GenTreeCast* cast); #ifdef DEBUG static void Print(IntegralRange range); #endif // DEBUG }; /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX TempsInfo XX XX XX XX The temporary lclVars allocated by the compiler for code generation XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ /***************************************************************************** * * The following keeps track of temporaries allocated in the stack frame * during code-generation (after register allocation). These spill-temps are * only used if we run out of registers while evaluating a tree. * * These are different from the more common temps allocated by lvaGrabTemp(). */ class TempDsc { public: TempDsc* tdNext; private: int tdOffs; #ifdef DEBUG static const int BAD_TEMP_OFFSET = 0xDDDDDDDD; // used as a sentinel "bad value" for tdOffs in DEBUG #endif // DEBUG int tdNum; BYTE tdSize; var_types tdType; public: TempDsc(int _tdNum, unsigned _tdSize, var_types _tdType) : tdNum(_tdNum), tdSize((BYTE)_tdSize), tdType(_tdType) { #ifdef DEBUG // temps must have a negative number (so they have a different number from all local variables) assert(tdNum < 0); tdOffs = BAD_TEMP_OFFSET; #endif // DEBUG if (tdNum != _tdNum) { IMPL_LIMITATION("too many spill temps"); } } #ifdef DEBUG bool tdLegalOffset() const { return tdOffs != BAD_TEMP_OFFSET; } #endif // DEBUG int tdTempOffs() const { assert(tdLegalOffset()); return tdOffs; } void tdSetTempOffs(int offs) { tdOffs = offs; assert(tdLegalOffset()); } void tdAdjustTempOffs(int offs) { tdOffs += offs; assert(tdLegalOffset()); } int tdTempNum() const { assert(tdNum < 0); return tdNum; } unsigned tdTempSize() const { return tdSize; } var_types tdTempType() const { return tdType; } }; // interface to hide linearscan implementation from rest of compiler class LinearScanInterface { public: virtual void doLinearScan() = 0; virtual void recordVarLocationsAtStartOfBB(BasicBlock* bb) = 0; virtual bool willEnregisterLocalVars() const = 0; #if TRACK_LSRA_STATS virtual void dumpLsraStatsCsv(FILE* file) = 0; virtual void dumpLsraStatsSummary(FILE* file) = 0; #endif // TRACK_LSRA_STATS }; LinearScanInterface* getLinearScanAllocator(Compiler* comp); // Information about arrays: their element type and size, and the offset of the first element. // We label GT_IND's that are array indices with GTF_IND_ARR_INDEX, and, for such nodes, // associate an array info via the map retrieved by GetArrayInfoMap(). This information is used, // for example, in value numbering of array index expressions. struct ArrayInfo { var_types m_elemType; CORINFO_CLASS_HANDLE m_elemStructType; unsigned m_elemSize; unsigned m_elemOffset; ArrayInfo() : m_elemType(TYP_UNDEF), m_elemStructType(nullptr), m_elemSize(0), m_elemOffset(0) { } ArrayInfo(var_types elemType, unsigned elemSize, unsigned elemOffset, CORINFO_CLASS_HANDLE elemStructType) : m_elemType(elemType), m_elemStructType(elemStructType), m_elemSize(elemSize), m_elemOffset(elemOffset) { } }; // This enumeration names the phases into which we divide compilation. The phases should completely // partition a compilation. enum Phases { #define CompPhaseNameMacro(enum_nm, string_nm, short_nm, hasChildren, parent, measureIR) enum_nm, #include "compphases.h" PHASE_NUMBER_OF }; extern const char* PhaseNames[]; extern const char* PhaseEnums[]; extern const LPCWSTR PhaseShortNames[]; // Specify which checks should be run after each phase // enum class PhaseChecks { CHECK_NONE, CHECK_ALL }; // Specify compiler data that a phase might modify enum class PhaseStatus : unsigned { MODIFIED_NOTHING, MODIFIED_EVERYTHING }; // The following enum provides a simple 1:1 mapping to CLR API's enum API_ICorJitInfo_Names { #define DEF_CLR_API(name) API_##name, #include "ICorJitInfo_API_names.h" API_COUNT }; //--------------------------------------------------------------- // Compilation time. // // A "CompTimeInfo" is a structure for tracking the compilation time of one or more methods. // We divide a compilation into a sequence of contiguous phases, and track the total (per-thread) cycles // of the compilation, as well as the cycles for each phase. We also track the number of bytecodes. // If there is a failure in reading a timer at any point, the "CompTimeInfo" becomes invalid, as indicated // by "m_timerFailure" being true. // If FEATURE_JIT_METHOD_PERF is not set, we define a minimal form of this, enough to let other code compile. struct CompTimeInfo { #ifdef FEATURE_JIT_METHOD_PERF // The string names of the phases. static const char* PhaseNames[]; static bool PhaseHasChildren[]; static int PhaseParent[]; static bool PhaseReportsIRSize[]; unsigned m_byteCodeBytes; unsigned __int64 m_totalCycles; unsigned __int64 m_invokesByPhase[PHASE_NUMBER_OF]; unsigned __int64 m_cyclesByPhase[PHASE_NUMBER_OF]; #if MEASURE_CLRAPI_CALLS unsigned __int64 m_CLRinvokesByPhase[PHASE_NUMBER_OF]; unsigned __int64 m_CLRcyclesByPhase[PHASE_NUMBER_OF]; #endif unsigned m_nodeCountAfterPhase[PHASE_NUMBER_OF]; // For better documentation, we call EndPhase on // non-leaf phases. We should also call EndPhase on the // last leaf subphase; obviously, the elapsed cycles between the EndPhase // for the last leaf subphase and the EndPhase for an ancestor should be very small. // We add all such "redundant end phase" intervals to this variable below; we print // it out in a report, so we can verify that it is, indeed, very small. If it ever // isn't, this means that we're doing something significant between the end of the last // declared subphase and the end of its parent. unsigned __int64 m_parentPhaseEndSlop; bool m_timerFailure; #if MEASURE_CLRAPI_CALLS // The following measures the time spent inside each individual CLR API call. unsigned m_allClrAPIcalls; unsigned m_perClrAPIcalls[API_ICorJitInfo_Names::API_COUNT]; unsigned __int64 m_allClrAPIcycles; unsigned __int64 m_perClrAPIcycles[API_ICorJitInfo_Names::API_COUNT]; unsigned __int32 m_maxClrAPIcycles[API_ICorJitInfo_Names::API_COUNT]; #endif // MEASURE_CLRAPI_CALLS CompTimeInfo(unsigned byteCodeBytes); #endif }; #ifdef FEATURE_JIT_METHOD_PERF #if MEASURE_CLRAPI_CALLS struct WrapICorJitInfo; #endif // This class summarizes the JIT time information over the course of a run: the number of methods compiled, // and the total and maximum timings. (These are instances of the "CompTimeInfo" type described above). // The operation of adding a single method's timing to the summary may be performed concurrently by several // threads, so it is protected by a lock. // This class is intended to be used as a singleton type, with only a single instance. class CompTimeSummaryInfo { // This lock protects the fields of all CompTimeSummaryInfo(s) (of which we expect there to be one). static CritSecObject s_compTimeSummaryLock; int m_numMethods; int m_totMethods; CompTimeInfo m_total; CompTimeInfo m_maximum; int m_numFilteredMethods; CompTimeInfo m_filtered; // This can use what ever data you want to determine if the value to be added // belongs in the filtered section (it's always included in the unfiltered section) bool IncludedInFilteredData(CompTimeInfo& info); public: // This is the unique CompTimeSummaryInfo object for this instance of the runtime. static CompTimeSummaryInfo s_compTimeSummary; CompTimeSummaryInfo() : m_numMethods(0), m_totMethods(0), m_total(0), m_maximum(0), m_numFilteredMethods(0), m_filtered(0) { } // Assumes that "info" is a completed CompTimeInfo for a compilation; adds it to the summary. // This is thread safe. void AddInfo(CompTimeInfo& info, bool includePhases); // Print the summary information to "f". // This is not thread-safe; assumed to be called by only one thread. void Print(FILE* f); }; // A JitTimer encapsulates a CompTimeInfo for a single compilation. It also tracks the start of compilation, // and when the current phase started. This is intended to be part of a Compilation object. // class JitTimer { unsigned __int64 m_start; // Start of the compilation. unsigned __int64 m_curPhaseStart; // Start of the current phase. #if MEASURE_CLRAPI_CALLS unsigned __int64 m_CLRcallStart; // Start of the current CLR API call (if any). unsigned __int64 m_CLRcallInvokes; // CLR API invokes under current outer so far unsigned __int64 m_CLRcallCycles; // CLR API cycles under current outer so far. int m_CLRcallAPInum; // The enum/index of the current CLR API call (or -1). static double s_cyclesPerSec; // Cached for speedier measurements #endif #ifdef DEBUG Phases m_lastPhase; // The last phase that was completed (or (Phases)-1 to start). #endif CompTimeInfo m_info; // The CompTimeInfo for this compilation. static CritSecObject s_csvLock; // Lock to protect the time log file. static FILE* s_csvFile; // The time log file handle. void PrintCsvMethodStats(Compiler* comp); private: void* operator new(size_t); void* operator new[](size_t); void operator delete(void*); void operator delete[](void*); public: // Initialized the timer instance JitTimer(unsigned byteCodeSize); static JitTimer* Create(Compiler* comp, unsigned byteCodeSize) { return ::new (comp, CMK_Unknown) JitTimer(byteCodeSize); } static void PrintCsvHeader(); // Ends the current phase (argument is for a redundant check). void EndPhase(Compiler* compiler, Phases phase); #if MEASURE_CLRAPI_CALLS // Start and end a timed CLR API call. void CLRApiCallEnter(unsigned apix); void CLRApiCallLeave(unsigned apix); #endif // MEASURE_CLRAPI_CALLS // Completes the timing of the current method, which is assumed to have "byteCodeBytes" bytes of bytecode, // and adds it to "sum". void Terminate(Compiler* comp, CompTimeSummaryInfo& sum, bool includePhases); // Attempts to query the cycle counter of the current thread. If successful, returns "true" and sets // *cycles to the cycle counter value. Otherwise, returns false and sets the "m_timerFailure" flag of // "m_info" to true. bool GetThreadCycles(unsigned __int64* cycles) { bool res = CycleTimer::GetThreadCyclesS(cycles); if (!res) { m_info.m_timerFailure = true; } return res; } static void Shutdown(); }; #endif // FEATURE_JIT_METHOD_PERF //------------------- Function/Funclet info ------------------------------- enum FuncKind : BYTE { FUNC_ROOT, // The main/root function (always id==0) FUNC_HANDLER, // a funclet associated with an EH handler (finally, fault, catch, filter handler) FUNC_FILTER, // a funclet associated with an EH filter FUNC_COUNT }; class emitLocation; struct FuncInfoDsc { FuncKind funKind; BYTE funFlags; // Currently unused, just here for padding unsigned short funEHIndex; // index, into the ebd table, of innermost EH clause corresponding to this // funclet. It is only valid if funKind field indicates this is a // EH-related funclet: FUNC_HANDLER or FUNC_FILTER #if defined(TARGET_AMD64) // TODO-AMD64-Throughput: make the AMD64 info more like the ARM info to avoid having this large static array. emitLocation* startLoc; emitLocation* endLoc; emitLocation* coldStartLoc; // locations for the cold section, if there is one. emitLocation* coldEndLoc; UNWIND_INFO unwindHeader; // Maximum of 255 UNWIND_CODE 'nodes' and then the unwind header. If there are an odd // number of codes, the VM or Zapper will 4-byte align the whole thing. BYTE unwindCodes[offsetof(UNWIND_INFO, UnwindCode) + (0xFF * sizeof(UNWIND_CODE))]; unsigned unwindCodeSlot; #elif defined(TARGET_X86) emitLocation* startLoc; emitLocation* endLoc; emitLocation* coldStartLoc; // locations for the cold section, if there is one. emitLocation* coldEndLoc; #elif defined(TARGET_ARMARCH) UnwindInfo uwi; // Unwind information for this function/funclet's hot section UnwindInfo* uwiCold; // Unwind information for this function/funclet's cold section // Note: we only have a pointer here instead of the actual object, // to save memory in the JIT case (compared to the NGEN case), // where we don't have any cold section. // Note 2: we currently don't support hot/cold splitting in functions // with EH, so uwiCold will be NULL for all funclets. emitLocation* startLoc; emitLocation* endLoc; emitLocation* coldStartLoc; // locations for the cold section, if there is one. emitLocation* coldEndLoc; #endif // TARGET_ARMARCH #if defined(FEATURE_CFI_SUPPORT) jitstd::vector<CFI_CODE>* cfiCodes; #endif // FEATURE_CFI_SUPPORT // Eventually we may want to move rsModifiedRegsMask, lvaOutgoingArgSize, and anything else // that isn't shared between the main function body and funclets. }; struct fgArgTabEntry { GenTreeCall::Use* use; // Points to the argument's GenTreeCall::Use in gtCallArgs or gtCallThisArg. GenTreeCall::Use* lateUse; // Points to the argument's GenTreeCall::Use in gtCallLateArgs, if any. // Get the node that coresponds to this argument entry. // This is the "real" node and not a placeholder or setup node. GenTree* GetNode() const { return lateUse == nullptr ? use->GetNode() : lateUse->GetNode(); } unsigned argNum; // The original argument number, also specifies the required argument evaluation order from the IL private: regNumberSmall regNums[MAX_ARG_REG_COUNT]; // The registers to use when passing this argument, set to REG_STK for // arguments passed on the stack public: unsigned numRegs; // Count of number of registers that this argument uses. // Note that on ARM, if we have a double hfa, this reflects the number // of DOUBLE registers. #if defined(UNIX_AMD64_ABI) // Unix amd64 will split floating point types and integer types in structs // between floating point and general purpose registers. Keep track of that // information so we do not need to recompute it later. unsigned structIntRegs; unsigned structFloatRegs; #endif // UNIX_AMD64_ABI #if defined(DEBUG_ARG_SLOTS) // These fields were used to calculate stack size in stack slots for arguments // but now they are replaced by precise `m_byteOffset/m_byteSize` because of // arm64 apple abi requirements. // A slot is a pointer sized region in the OutArg area. unsigned slotNum; // When an argument is passed in the OutArg area this is the slot number in the OutArg area unsigned numSlots; // Count of number of slots that this argument uses #endif // DEBUG_ARG_SLOTS // Return number of stack slots that this argument is taking. // TODO-Cleanup: this function does not align with arm64 apple model, // delete it. In most cases we just want to know if we it is using stack or not // but in some cases we are checking if it is a multireg arg, like: // `numRegs + GetStackSlotsNumber() > 1` that is harder to replace. // unsigned GetStackSlotsNumber() const { return roundUp(GetStackByteSize(), TARGET_POINTER_SIZE) / TARGET_POINTER_SIZE; } private: unsigned _lateArgInx; // index into gtCallLateArgs list; UINT_MAX if this is not a late arg. public: unsigned tmpNum; // the LclVar number if we had to force evaluation of this arg var_types argType; // The type used to pass this argument. This is generally the original argument type, but when a // struct is passed as a scalar type, this is that type. // Note that if a struct is passed by reference, this will still be the struct type. bool needTmp : 1; // True when we force this argument's evaluation into a temp LclVar bool needPlace : 1; // True when we must replace this argument with a placeholder node bool isTmp : 1; // True when we setup a temp LclVar for this argument due to size issues with the struct bool processed : 1; // True when we have decided the evaluation order for this argument in the gtCallLateArgs bool isBackFilled : 1; // True when the argument fills a register slot skipped due to alignment requirements of // previous arguments. NonStandardArgKind nonStandardArgKind : 4; // The non-standard arg kind. Non-standard args are args that are forced // to be in certain registers or on the stack, regardless of where they // appear in the arg list. bool isStruct : 1; // True if this is a struct arg bool _isVararg : 1; // True if the argument is in a vararg context. bool passedByRef : 1; // True iff the argument is passed by reference. #if FEATURE_ARG_SPLIT bool _isSplit : 1; // True when this argument is split between the registers and OutArg area #endif // FEATURE_ARG_SPLIT #ifdef FEATURE_HFA_FIELDS_PRESENT CorInfoHFAElemType _hfaElemKind : 3; // What kind of an HFA this is (CORINFO_HFA_ELEM_NONE if it is not an HFA). #endif CorInfoHFAElemType GetHfaElemKind() const { #ifdef FEATURE_HFA_FIELDS_PRESENT return _hfaElemKind; #else NOWAY_MSG("GetHfaElemKind"); return CORINFO_HFA_ELEM_NONE; #endif } void SetHfaElemKind(CorInfoHFAElemType elemKind) { #ifdef FEATURE_HFA_FIELDS_PRESENT _hfaElemKind = elemKind; #else NOWAY_MSG("SetHfaElemKind"); #endif } bool isNonStandard() const { return nonStandardArgKind != NonStandardArgKind::None; } // Returns true if the IR node for this non-standarg arg is added by fgInitArgInfo. // In this case, it must be removed by GenTreeCall::ResetArgInfo. bool isNonStandardArgAddedLate() const { switch (static_cast<NonStandardArgKind>(nonStandardArgKind)) { case NonStandardArgKind::None: case NonStandardArgKind::PInvokeFrame: case NonStandardArgKind::ShiftLow: case NonStandardArgKind::ShiftHigh: case NonStandardArgKind::FixedRetBuffer: case NonStandardArgKind::ValidateIndirectCallTarget: return false; case NonStandardArgKind::WrapperDelegateCell: case NonStandardArgKind::VirtualStubCell: case NonStandardArgKind::PInvokeCookie: case NonStandardArgKind::PInvokeTarget: case NonStandardArgKind::R2RIndirectionCell: return true; default: unreached(); } } bool isLateArg() const { bool isLate = (_lateArgInx != UINT_MAX); return isLate; } unsigned GetLateArgInx() const { assert(isLateArg()); return _lateArgInx; } void SetLateArgInx(unsigned inx) { _lateArgInx = inx; } regNumber GetRegNum() const { return (regNumber)regNums[0]; } regNumber GetOtherRegNum() const { return (regNumber)regNums[1]; } #if defined(UNIX_AMD64_ABI) SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc; #endif void setRegNum(unsigned int i, regNumber regNum) { assert(i < MAX_ARG_REG_COUNT); regNums[i] = (regNumberSmall)regNum; } regNumber GetRegNum(unsigned int i) { assert(i < MAX_ARG_REG_COUNT); return (regNumber)regNums[i]; } bool IsSplit() const { #if FEATURE_ARG_SPLIT return compFeatureArgSplit() && _isSplit; #else // FEATURE_ARG_SPLIT return false; #endif } void SetSplit(bool value) { #if FEATURE_ARG_SPLIT _isSplit = value; #endif } bool IsVararg() const { return compFeatureVarArg() && _isVararg; } void SetIsVararg(bool value) { if (compFeatureVarArg()) { _isVararg = value; } } bool IsHfaArg() const { if (GlobalJitOptions::compFeatureHfa) { return IsHfa(GetHfaElemKind()); } else { return false; } } bool IsHfaRegArg() const { if (GlobalJitOptions::compFeatureHfa) { return IsHfa(GetHfaElemKind()) && isPassedInRegisters(); } else { return false; } } unsigned intRegCount() const { #if defined(UNIX_AMD64_ABI) if (this->isStruct) { return this->structIntRegs; } #endif // defined(UNIX_AMD64_ABI) if (!this->isPassedInFloatRegisters()) { return this->numRegs; } return 0; } unsigned floatRegCount() const { #if defined(UNIX_AMD64_ABI) if (this->isStruct) { return this->structFloatRegs; } #endif // defined(UNIX_AMD64_ABI) if (this->isPassedInFloatRegisters()) { return this->numRegs; } return 0; } // Get the number of bytes that this argument is occupying on the stack, // including padding up to the target pointer size for platforms // where a stack argument can't take less. unsigned GetStackByteSize() const { if (!IsSplit() && numRegs > 0) { return 0; } assert(!IsHfaArg() || !IsSplit()); assert(GetByteSize() > TARGET_POINTER_SIZE * numRegs); const unsigned stackByteSize = GetByteSize() - TARGET_POINTER_SIZE * numRegs; return stackByteSize; } var_types GetHfaType() const { if (GlobalJitOptions::compFeatureHfa) { return HfaTypeFromElemKind(GetHfaElemKind()); } else { return TYP_UNDEF; } } void SetHfaType(var_types type, unsigned hfaSlots) { if (GlobalJitOptions::compFeatureHfa) { if (type != TYP_UNDEF) { // We must already have set the passing mode. assert(numRegs != 0 || GetStackByteSize() != 0); // We originally set numRegs according to the size of the struct, but if the size of the // hfaType is not the same as the pointer size, we need to correct it. // Note that hfaSlots is the number of registers we will use. For ARM, that is twice // the number of "double registers". unsigned numHfaRegs = hfaSlots; #ifdef TARGET_ARM if (type == TYP_DOUBLE) { // Must be an even number of registers. assert((numRegs & 1) == 0); numHfaRegs = hfaSlots / 2; } #endif // TARGET_ARM if (!IsHfaArg()) { // We haven't previously set this; do so now. CorInfoHFAElemType elemKind = HfaElemKindFromType(type); SetHfaElemKind(elemKind); // Ensure we've allocated enough bits. assert(GetHfaElemKind() == elemKind); if (isPassedInRegisters()) { numRegs = numHfaRegs; } } else { // We've already set this; ensure that it's consistent. if (isPassedInRegisters()) { assert(numRegs == numHfaRegs); } assert(type == HfaTypeFromElemKind(GetHfaElemKind())); } } } } #ifdef TARGET_ARM void SetIsBackFilled(bool backFilled) { isBackFilled = backFilled; } bool IsBackFilled() const { return isBackFilled; } #else // !TARGET_ARM void SetIsBackFilled(bool backFilled) { } bool IsBackFilled() const { return false; } #endif // !TARGET_ARM bool isPassedInRegisters() const { return !IsSplit() && (numRegs != 0); } bool isPassedInFloatRegisters() const { #ifdef TARGET_X86 return false; #else return isValidFloatArgReg(GetRegNum()); #endif } // Can we replace the struct type of this node with a primitive type for argument passing? bool TryPassAsPrimitive() const { return !IsSplit() && ((numRegs == 1) || (m_byteSize <= TARGET_POINTER_SIZE)); } #if defined(DEBUG_ARG_SLOTS) // Returns the number of "slots" used, where for this purpose a // register counts as a slot. unsigned getSlotCount() const { if (isBackFilled) { assert(isPassedInRegisters()); assert(numRegs == 1); } else if (GetRegNum() == REG_STK) { assert(!isPassedInRegisters()); assert(numRegs == 0); } else { assert(numRegs > 0); } return numSlots + numRegs; } #endif #if defined(DEBUG_ARG_SLOTS) // Returns the size as a multiple of pointer-size. // For targets without HFAs, this is the same as getSlotCount(). unsigned getSize() const { unsigned size = getSlotCount(); if (GlobalJitOptions::compFeatureHfa) { if (IsHfaRegArg()) { #ifdef TARGET_ARM // We counted the number of regs, but if they are DOUBLE hfa regs we have to double the size. if (GetHfaType() == TYP_DOUBLE) { assert(!IsSplit()); size <<= 1; } #elif defined(TARGET_ARM64) // We counted the number of regs, but if they are FLOAT hfa regs we have to halve the size, // or if they are SIMD16 vector hfa regs we have to double the size. if (GetHfaType() == TYP_FLOAT) { // Round up in case of odd HFA count. size = (size + 1) >> 1; } #ifdef FEATURE_SIMD else if (GetHfaType() == TYP_SIMD16) { size <<= 1; } #endif // FEATURE_SIMD #endif // TARGET_ARM64 } } return size; } #endif // DEBUG_ARG_SLOTS private: unsigned m_byteOffset; // byte size that this argument takes including the padding after. // For example, 1-byte arg on x64 with 8-byte alignment // will have `m_byteSize == 8`, the same arg on apple arm64 will have `m_byteSize == 1`. unsigned m_byteSize; unsigned m_byteAlignment; // usually 4 or 8 bytes (slots/registers). public: void SetByteOffset(unsigned byteOffset) { DEBUG_ARG_SLOTS_ASSERT(byteOffset / TARGET_POINTER_SIZE == slotNum); m_byteOffset = byteOffset; } unsigned GetByteOffset() const { DEBUG_ARG_SLOTS_ASSERT(m_byteOffset / TARGET_POINTER_SIZE == slotNum); return m_byteOffset; } void SetByteSize(unsigned byteSize, bool isStruct, bool isFloatHfa) { unsigned roundedByteSize; if (compMacOsArm64Abi()) { // Only struct types need extension or rounding to pointer size, but HFA<float> does not. if (isStruct && !isFloatHfa) { roundedByteSize = roundUp(byteSize, TARGET_POINTER_SIZE); } else { roundedByteSize = byteSize; } } else { roundedByteSize = roundUp(byteSize, TARGET_POINTER_SIZE); } #if !defined(TARGET_ARM) // Arm32 could have a struct with 8 byte alignment // which rounded size % 8 is not 0. assert(m_byteAlignment != 0); assert(roundedByteSize % m_byteAlignment == 0); #endif // TARGET_ARM #if defined(DEBUG_ARG_SLOTS) if (!compMacOsArm64Abi() && !isStruct) { assert(roundedByteSize == getSlotCount() * TARGET_POINTER_SIZE); } #endif m_byteSize = roundedByteSize; } unsigned GetByteSize() const { return m_byteSize; } void SetByteAlignment(unsigned byteAlignment) { m_byteAlignment = byteAlignment; } unsigned GetByteAlignment() const { return m_byteAlignment; } // Set the register numbers for a multireg argument. // There's nothing to do on x64/Ux because the structDesc has already been used to set the // register numbers. void SetMultiRegNums() { #if FEATURE_MULTIREG_ARGS && !defined(UNIX_AMD64_ABI) if (numRegs == 1) { return; } regNumber argReg = GetRegNum(0); #ifdef TARGET_ARM unsigned int regSize = (GetHfaType() == TYP_DOUBLE) ? 2 : 1; #else unsigned int regSize = 1; #endif if (numRegs > MAX_ARG_REG_COUNT) NO_WAY("Multireg argument exceeds the maximum length"); for (unsigned int regIndex = 1; regIndex < numRegs; regIndex++) { argReg = (regNumber)(argReg + regSize); setRegNum(regIndex, argReg); } #endif // FEATURE_MULTIREG_ARGS && !defined(UNIX_AMD64_ABI) } #ifdef DEBUG // Check that the value of 'isStruct' is consistent. // A struct arg must be one of the following: // - A node of struct type, // - A GT_FIELD_LIST, or // - A node of a scalar type, passed in a single register or slot // (or two slots in the case of a struct pass on the stack as TYP_DOUBLE). // void checkIsStruct() const { GenTree* node = GetNode(); if (isStruct) { if (!varTypeIsStruct(node) && !node->OperIs(GT_FIELD_LIST)) { // This is the case where we are passing a struct as a primitive type. // On most targets, this is always a single register or slot. // However, on ARM this could be two slots if it is TYP_DOUBLE. bool isPassedAsPrimitiveType = ((numRegs == 1) || ((numRegs == 0) && (GetByteSize() <= TARGET_POINTER_SIZE))); #ifdef TARGET_ARM if (!isPassedAsPrimitiveType) { if (node->TypeGet() == TYP_DOUBLE && numRegs == 0 && (numSlots == 2)) { isPassedAsPrimitiveType = true; } } #endif // TARGET_ARM assert(isPassedAsPrimitiveType); } } else { assert(!varTypeIsStruct(node)); } } void Dump() const; #endif }; //------------------------------------------------------------------------- // // The class fgArgInfo is used to handle the arguments // when morphing a GT_CALL node. // class fgArgInfo { Compiler* compiler; // Back pointer to the compiler instance so that we can allocate memory GenTreeCall* callTree; // Back pointer to the GT_CALL node for this fgArgInfo unsigned argCount; // Updatable arg count value #if defined(DEBUG_ARG_SLOTS) unsigned nextSlotNum; // Updatable slot count value #endif unsigned nextStackByteOffset; unsigned stkLevel; // Stack depth when we make this call (for x86) #if defined(UNIX_X86_ABI) bool alignmentDone; // Updateable flag, set to 'true' after we've done any required alignment. unsigned stkSizeBytes; // Size of stack used by this call, in bytes. Calculated during fgMorphArgs(). unsigned padStkAlign; // Stack alignment in bytes required before arguments are pushed for this call. // Computed dynamically during codegen, based on stkSizeBytes and the current // stack level (genStackLevel) when the first stack adjustment is made for // this call. #endif #if FEATURE_FIXED_OUT_ARGS unsigned outArgSize; // Size of the out arg area for the call, will be at least MIN_ARG_AREA_FOR_CALL #endif unsigned argTableSize; // size of argTable array (equal to the argCount when done with fgMorphArgs) bool hasRegArgs; // true if we have one or more register arguments bool hasStackArgs; // true if we have one or more stack arguments bool argsComplete; // marker for state bool argsSorted; // marker for state bool needsTemps; // one or more arguments must be copied to a temp by EvalArgsToTemps fgArgTabEntry** argTable; // variable sized array of per argument descrption: (i.e. argTable[argTableSize]) private: void AddArg(fgArgTabEntry* curArgTabEntry); public: fgArgInfo(Compiler* comp, GenTreeCall* call, unsigned argCount); fgArgInfo(GenTreeCall* newCall, GenTreeCall* oldCall); fgArgTabEntry* AddRegArg(unsigned argNum, GenTree* node, GenTreeCall::Use* use, regNumber regNum, unsigned numRegs, unsigned byteSize, unsigned byteAlignment, bool isStruct, bool isFloatHfa, bool isVararg = false); #ifdef UNIX_AMD64_ABI fgArgTabEntry* AddRegArg(unsigned argNum, GenTree* node, GenTreeCall::Use* use, regNumber regNum, unsigned numRegs, unsigned byteSize, unsigned byteAlignment, const bool isStruct, const bool isFloatHfa, const bool isVararg, const regNumber otherRegNum, const unsigned structIntRegs, const unsigned structFloatRegs, const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* const structDescPtr = nullptr); #endif // UNIX_AMD64_ABI fgArgTabEntry* AddStkArg(unsigned argNum, GenTree* node, GenTreeCall::Use* use, unsigned numSlots, unsigned byteSize, unsigned byteAlignment, bool isStruct, bool isFloatHfa, bool isVararg = false); void RemorphReset(); void UpdateRegArg(fgArgTabEntry* argEntry, GenTree* node, bool reMorphing); void UpdateStkArg(fgArgTabEntry* argEntry, GenTree* node, bool reMorphing); void SplitArg(unsigned argNum, unsigned numRegs, unsigned numSlots); void EvalToTmp(fgArgTabEntry* curArgTabEntry, unsigned tmpNum, GenTree* newNode); void ArgsComplete(); void SortArgs(); void EvalArgsToTemps(); unsigned ArgCount() const { return argCount; } fgArgTabEntry** ArgTable() const { return argTable; } #if defined(DEBUG_ARG_SLOTS) unsigned GetNextSlotNum() const { return nextSlotNum; } #endif unsigned GetNextSlotByteOffset() const { return nextStackByteOffset; } bool HasRegArgs() const { return hasRegArgs; } bool NeedsTemps() const { return needsTemps; } bool HasStackArgs() const { return hasStackArgs; } bool AreArgsComplete() const { return argsComplete; } #if FEATURE_FIXED_OUT_ARGS unsigned GetOutArgSize() const { return outArgSize; } void SetOutArgSize(unsigned newVal) { outArgSize = newVal; } #endif // FEATURE_FIXED_OUT_ARGS #if defined(UNIX_X86_ABI) void ComputeStackAlignment(unsigned curStackLevelInBytes) { padStkAlign = AlignmentPad(curStackLevelInBytes, STACK_ALIGN); } unsigned GetStkAlign() const { return padStkAlign; } void SetStkSizeBytes(unsigned newStkSizeBytes) { stkSizeBytes = newStkSizeBytes; } unsigned GetStkSizeBytes() const { return stkSizeBytes; } bool IsStkAlignmentDone() const { return alignmentDone; } void SetStkAlignmentDone() { alignmentDone = true; } #endif // defined(UNIX_X86_ABI) // Get the fgArgTabEntry for the arg at position argNum. fgArgTabEntry* GetArgEntry(unsigned argNum, bool reMorphing = true) const { fgArgTabEntry* curArgTabEntry = nullptr; if (!reMorphing) { // The arg table has not yet been sorted. curArgTabEntry = argTable[argNum]; assert(curArgTabEntry->argNum == argNum); return curArgTabEntry; } for (unsigned i = 0; i < argCount; i++) { curArgTabEntry = argTable[i]; if (curArgTabEntry->argNum == argNum) { return curArgTabEntry; } } noway_assert(!"GetArgEntry: argNum not found"); return nullptr; } void SetNeedsTemps() { needsTemps = true; } // Get the node for the arg at position argIndex. // Caller must ensure that this index is a valid arg index. GenTree* GetArgNode(unsigned argIndex) const { return GetArgEntry(argIndex)->GetNode(); } void Dump(Compiler* compiler) const; }; #ifdef DEBUG // XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX // We have the ability to mark source expressions with "Test Labels." // These drive assertions within the JIT, or internal JIT testing. For example, we could label expressions // that should be CSE defs, and other expressions that should uses of those defs, with a shared label. enum TestLabel // This must be kept identical to System.Runtime.CompilerServices.JitTestLabel.TestLabel. { TL_SsaName, TL_VN, // Defines a "VN equivalence class". (For full VN, including exceptions thrown). TL_VNNorm, // Like above, but uses the non-exceptional value of the expression. TL_CSE_Def, // This must be identified in the JIT as a CSE def TL_CSE_Use, // This must be identified in the JIT as a CSE use TL_LoopHoist, // Expression must (or must not) be hoisted out of the loop. }; struct TestLabelAndNum { TestLabel m_tl; ssize_t m_num; TestLabelAndNum() : m_tl(TestLabel(0)), m_num(0) { } }; typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, TestLabelAndNum> NodeToTestDataMap; // XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #endif // DEBUG //------------------------------------------------------------------------- // LoopFlags: flags for the loop table. // enum LoopFlags : unsigned short { LPFLG_EMPTY = 0, // LPFLG_UNUSED = 0x0001, // LPFLG_UNUSED = 0x0002, LPFLG_ITER = 0x0004, // loop of form: for (i = icon or lclVar; test_condition(); i++) // LPFLG_UNUSED = 0x0008, LPFLG_CONTAINS_CALL = 0x0010, // If executing the loop body *may* execute a call LPFLG_VAR_INIT = 0x0020, // iterator is initialized with a local var (var # found in lpVarInit) LPFLG_CONST_INIT = 0x0040, // iterator is initialized with a constant (found in lpConstInit) LPFLG_SIMD_LIMIT = 0x0080, // iterator is compared with vector element count (found in lpConstLimit) LPFLG_VAR_LIMIT = 0x0100, // iterator is compared with a local var (var # found in lpVarLimit) LPFLG_CONST_LIMIT = 0x0200, // iterator is compared with a constant (found in lpConstLimit) LPFLG_ARRLEN_LIMIT = 0x0400, // iterator is compared with a.len or a[i].len (found in lpArrLenLimit) LPFLG_HAS_PREHEAD = 0x0800, // lpHead is known to be a preHead for this loop LPFLG_REMOVED = 0x1000, // has been removed from the loop table (unrolled or optimized away) LPFLG_DONT_UNROLL = 0x2000, // do not unroll this loop LPFLG_ASGVARS_YES = 0x4000, // "lpAsgVars" has been computed LPFLG_ASGVARS_INC = 0x8000, // "lpAsgVars" is incomplete -- vars beyond those representable in an AllVarSet // type are assigned to. }; inline constexpr LoopFlags operator~(LoopFlags a) { return (LoopFlags)(~(unsigned short)a); } inline constexpr LoopFlags operator|(LoopFlags a, LoopFlags b) { return (LoopFlags)((unsigned short)a | (unsigned short)b); } inline constexpr LoopFlags operator&(LoopFlags a, LoopFlags b) { return (LoopFlags)((unsigned short)a & (unsigned short)b); } inline LoopFlags& operator|=(LoopFlags& a, LoopFlags b) { return a = (LoopFlags)((unsigned short)a | (unsigned short)b); } inline LoopFlags& operator&=(LoopFlags& a, LoopFlags b) { return a = (LoopFlags)((unsigned short)a & (unsigned short)b); } // The following holds information about instr offsets in terms of generated code. enum class IPmappingDscKind { Prolog, // The mapping represents the start of a prolog. Epilog, // The mapping represents the start of an epilog. NoMapping, // This does not map to any IL offset. Normal, // The mapping maps to an IL offset. }; struct IPmappingDsc { emitLocation ipmdNativeLoc; // the emitter location of the native code corresponding to the IL offset IPmappingDscKind ipmdKind; // The kind of mapping ILLocation ipmdLoc; // The location for normal mappings bool ipmdIsLabel; // Can this code be a branch label? }; struct PreciseIPMapping { emitLocation nativeLoc; DebugInfo debugInfo; }; /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX The big guy. The sections are currently organized as : XX XX XX XX o GenTree and BasicBlock XX XX o LclVarsInfo XX XX o Importer XX XX o FlowGraph XX XX o Optimizer XX XX o RegAlloc XX XX o EEInterface XX XX o TempsInfo XX XX o RegSet XX XX o GCInfo XX XX o Instruction XX XX o ScopeInfo XX XX o PrologScopeInfo XX XX o CodeGenerator XX XX o UnwindInfo XX XX o Compiler XX XX o typeInfo XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ struct HWIntrinsicInfo; class Compiler { friend class emitter; friend class UnwindInfo; friend class UnwindFragmentInfo; friend class UnwindEpilogInfo; friend class JitTimer; friend class LinearScan; friend class fgArgInfo; friend class Rationalizer; friend class Phase; friend class Lowering; friend class CSE_DataFlow; friend class CSE_Heuristic; friend class CodeGenInterface; friend class CodeGen; friend class LclVarDsc; friend class TempDsc; friend class LIR; friend class ObjectAllocator; friend class LocalAddressVisitor; friend struct GenTree; friend class MorphInitBlockHelper; friend class MorphCopyBlockHelper; #ifdef FEATURE_HW_INTRINSICS friend struct HWIntrinsicInfo; #endif // FEATURE_HW_INTRINSICS #ifndef TARGET_64BIT friend class DecomposeLongs; #endif // !TARGET_64BIT /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Misc structs definitions XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: hashBvGlobalData hbvGlobalData; // Used by the hashBv bitvector package. #ifdef DEBUG bool verbose; bool verboseTrees; bool shouldUseVerboseTrees(); bool asciiTrees; // If true, dump trees using only ASCII characters bool shouldDumpASCIITrees(); bool verboseSsa; // If true, produce especially verbose dump output in SSA construction. bool shouldUseVerboseSsa(); bool treesBeforeAfterMorph; // If true, print trees before/after morphing (paired by an intra-compilation id: int morphNum; // This counts the the trees that have been morphed, allowing us to label each uniquely. bool doExtraSuperPmiQueries; void makeExtraStructQueries(CORINFO_CLASS_HANDLE structHandle, int level); // Make queries recursively 'level' deep. const char* VarNameToStr(VarName name) { return name; } DWORD expensiveDebugCheckLevel; #endif #if FEATURE_MULTIREG_RET GenTree* impAssignMultiRegTypeToVar(GenTree* op, CORINFO_CLASS_HANDLE hClass DEBUGARG(CorInfoCallConvExtension callConv)); #endif // FEATURE_MULTIREG_RET #ifdef TARGET_X86 bool isTrivialPointerSizedStruct(CORINFO_CLASS_HANDLE clsHnd) const; #endif // TARGET_X86 //------------------------------------------------------------------------- // Functions to handle homogeneous floating-point aggregates (HFAs) in ARM/ARM64. // HFAs are one to four element structs where each element is the same // type, either all float or all double. We handle HVAs (one to four elements of // vector types) uniformly with HFAs. HFAs are treated specially // in the ARM/ARM64 Procedure Call Standards, specifically, they are passed in // floating-point registers instead of the general purpose registers. // bool IsHfa(CORINFO_CLASS_HANDLE hClass); bool IsHfa(GenTree* tree); var_types GetHfaType(GenTree* tree); unsigned GetHfaCount(GenTree* tree); var_types GetHfaType(CORINFO_CLASS_HANDLE hClass); unsigned GetHfaCount(CORINFO_CLASS_HANDLE hClass); bool IsMultiRegReturnedType(CORINFO_CLASS_HANDLE hClass, CorInfoCallConvExtension callConv); //------------------------------------------------------------------------- // The following is used for validating format of EH table // struct EHNodeDsc; typedef struct EHNodeDsc* pEHNodeDsc; EHNodeDsc* ehnTree; // root of the tree comprising the EHnodes. EHNodeDsc* ehnNext; // root of the tree comprising the EHnodes. struct EHNodeDsc { enum EHBlockType { TryNode, FilterNode, HandlerNode, FinallyNode, FaultNode }; EHBlockType ehnBlockType; // kind of EH block IL_OFFSET ehnStartOffset; // IL offset of start of the EH block IL_OFFSET ehnEndOffset; // IL offset past end of the EH block. (TODO: looks like verInsertEhNode() sets this to // the last IL offset, not "one past the last one", i.e., the range Start to End is // inclusive). pEHNodeDsc ehnNext; // next (non-nested) block in sequential order pEHNodeDsc ehnChild; // leftmost nested block union { pEHNodeDsc ehnTryNode; // for filters and handlers, the corresponding try node pEHNodeDsc ehnHandlerNode; // for a try node, the corresponding handler node }; pEHNodeDsc ehnFilterNode; // if this is a try node and has a filter, otherwise 0 pEHNodeDsc ehnEquivalent; // if blockType=tryNode, start offset and end offset is same, void ehnSetTryNodeType() { ehnBlockType = TryNode; } void ehnSetFilterNodeType() { ehnBlockType = FilterNode; } void ehnSetHandlerNodeType() { ehnBlockType = HandlerNode; } void ehnSetFinallyNodeType() { ehnBlockType = FinallyNode; } void ehnSetFaultNodeType() { ehnBlockType = FaultNode; } bool ehnIsTryBlock() { return ehnBlockType == TryNode; } bool ehnIsFilterBlock() { return ehnBlockType == FilterNode; } bool ehnIsHandlerBlock() { return ehnBlockType == HandlerNode; } bool ehnIsFinallyBlock() { return ehnBlockType == FinallyNode; } bool ehnIsFaultBlock() { return ehnBlockType == FaultNode; } // returns true if there is any overlap between the two nodes static bool ehnIsOverlap(pEHNodeDsc node1, pEHNodeDsc node2) { if (node1->ehnStartOffset < node2->ehnStartOffset) { return (node1->ehnEndOffset >= node2->ehnStartOffset); } else { return (node1->ehnStartOffset <= node2->ehnEndOffset); } } // fails with BADCODE if inner is not completely nested inside outer static bool ehnIsNested(pEHNodeDsc inner, pEHNodeDsc outer) { return ((inner->ehnStartOffset >= outer->ehnStartOffset) && (inner->ehnEndOffset <= outer->ehnEndOffset)); } }; //------------------------------------------------------------------------- // Exception handling functions // #if !defined(FEATURE_EH_FUNCLETS) bool ehNeedsShadowSPslots() { return (info.compXcptnsCount || opts.compDbgEnC); } // 0 for methods with no EH // 1 for methods with non-nested EH, or where only the try blocks are nested // 2 for a method with a catch within a catch // etc. unsigned ehMaxHndNestingCount; #endif // !FEATURE_EH_FUNCLETS static bool jitIsBetween(unsigned value, unsigned start, unsigned end); static bool jitIsBetweenInclusive(unsigned value, unsigned start, unsigned end); bool bbInCatchHandlerILRange(BasicBlock* blk); bool bbInFilterILRange(BasicBlock* blk); bool bbInTryRegions(unsigned regionIndex, BasicBlock* blk); bool bbInExnFlowRegions(unsigned regionIndex, BasicBlock* blk); bool bbInHandlerRegions(unsigned regionIndex, BasicBlock* blk); bool bbInCatchHandlerRegions(BasicBlock* tryBlk, BasicBlock* hndBlk); unsigned short bbFindInnermostCommonTryRegion(BasicBlock* bbOne, BasicBlock* bbTwo); unsigned short bbFindInnermostTryRegionContainingHandlerRegion(unsigned handlerIndex); unsigned short bbFindInnermostHandlerRegionContainingTryRegion(unsigned tryIndex); // Returns true if "block" is the start of a try region. bool bbIsTryBeg(BasicBlock* block); // Returns true if "block" is the start of a handler or filter region. bool bbIsHandlerBeg(BasicBlock* block); // Returns true iff "block" is where control flows if an exception is raised in the // try region, and sets "*regionIndex" to the index of the try for the handler. // Differs from "IsHandlerBeg" in the case of filters, where this is true for the first // block of the filter, but not for the filter's handler. bool bbIsExFlowBlock(BasicBlock* block, unsigned* regionIndex); bool ehHasCallableHandlers(); // Return the EH descriptor for the given region index. EHblkDsc* ehGetDsc(unsigned regionIndex); // Return the EH index given a region descriptor. unsigned ehGetIndex(EHblkDsc* ehDsc); // Return the EH descriptor index of the enclosing try, for the given region index. unsigned ehGetEnclosingTryIndex(unsigned regionIndex); // Return the EH descriptor index of the enclosing handler, for the given region index. unsigned ehGetEnclosingHndIndex(unsigned regionIndex); // Return the EH descriptor for the most nested 'try' region this BasicBlock is a member of (or nullptr if this // block is not in a 'try' region). EHblkDsc* ehGetBlockTryDsc(BasicBlock* block); // Return the EH descriptor for the most nested filter or handler region this BasicBlock is a member of (or nullptr // if this block is not in a filter or handler region). EHblkDsc* ehGetBlockHndDsc(BasicBlock* block); // Return the EH descriptor for the most nested region that may handle exceptions raised in this BasicBlock (or // nullptr if this block's exceptions propagate to caller). EHblkDsc* ehGetBlockExnFlowDsc(BasicBlock* block); EHblkDsc* ehIsBlockTryLast(BasicBlock* block); EHblkDsc* ehIsBlockHndLast(BasicBlock* block); bool ehIsBlockEHLast(BasicBlock* block); bool ehBlockHasExnFlowDsc(BasicBlock* block); // Return the region index of the most nested EH region this block is in. unsigned ehGetMostNestedRegionIndex(BasicBlock* block, bool* inTryRegion); // Find the true enclosing try index, ignoring 'mutual protect' try. Uses IL ranges to check. unsigned ehTrueEnclosingTryIndexIL(unsigned regionIndex); // Return the index of the most nested enclosing region for a particular EH region. Returns NO_ENCLOSING_INDEX // if there is no enclosing region. If the returned index is not NO_ENCLOSING_INDEX, then '*inTryRegion' // is set to 'true' if the enclosing region is a 'try', or 'false' if the enclosing region is a handler. // (It can never be a filter.) unsigned ehGetEnclosingRegionIndex(unsigned regionIndex, bool* inTryRegion); // A block has been deleted. Update the EH table appropriately. void ehUpdateForDeletedBlock(BasicBlock* block); // Determine whether a block can be deleted while preserving the EH normalization rules. bool ehCanDeleteEmptyBlock(BasicBlock* block); // Update the 'last' pointers in the EH table to reflect new or deleted blocks in an EH region. void ehUpdateLastBlocks(BasicBlock* oldLast, BasicBlock* newLast); // For a finally handler, find the region index that the BBJ_CALLFINALLY lives in that calls the handler, // or NO_ENCLOSING_INDEX if the BBJ_CALLFINALLY lives in the main function body. Normally, the index // is the same index as the handler (and the BBJ_CALLFINALLY lives in the 'try' region), but for AMD64 the // BBJ_CALLFINALLY lives in the enclosing try or handler region, whichever is more nested, or the main function // body. If the returned index is not NO_ENCLOSING_INDEX, then '*inTryRegion' is set to 'true' if the // BBJ_CALLFINALLY lives in the returned index's 'try' region, or 'false' if lives in the handler region. (It never // lives in a filter.) unsigned ehGetCallFinallyRegionIndex(unsigned finallyIndex, bool* inTryRegion); // Find the range of basic blocks in which all BBJ_CALLFINALLY will be found that target the 'finallyIndex' region's // handler. Set begBlk to the first block, and endBlk to the block after the last block of the range // (nullptr if the last block is the last block in the program). // Precondition: 'finallyIndex' is the EH region of a try/finally clause. void ehGetCallFinallyBlockRange(unsigned finallyIndex, BasicBlock** begBlk, BasicBlock** endBlk); #ifdef DEBUG // Given a BBJ_CALLFINALLY block and the EH region index of the finally it is calling, return // 'true' if the BBJ_CALLFINALLY is in the correct EH region. bool ehCallFinallyInCorrectRegion(BasicBlock* blockCallFinally, unsigned finallyIndex); #endif // DEBUG #if defined(FEATURE_EH_FUNCLETS) // Do we need a PSPSym in the main function? For codegen purposes, we only need one // if there is a filter that protects a region with a nested EH clause (such as a // try/catch nested in the 'try' body of a try/filter/filter-handler). See // genFuncletProlog() for more details. However, the VM seems to use it for more // purposes, maybe including debugging. Until we are sure otherwise, always create // a PSPSym for functions with any EH. bool ehNeedsPSPSym() const { #ifdef TARGET_X86 return false; #else // TARGET_X86 return compHndBBtabCount > 0; #endif // TARGET_X86 } bool ehAnyFunclets(); // Are there any funclets in this function? unsigned ehFuncletCount(); // Return the count of funclets in the function unsigned bbThrowIndex(BasicBlock* blk); // Get the index to use as the cache key for sharing throw blocks #else // !FEATURE_EH_FUNCLETS bool ehAnyFunclets() { return false; } unsigned ehFuncletCount() { return 0; } unsigned bbThrowIndex(BasicBlock* blk) { return blk->bbTryIndex; } // Get the index to use as the cache key for sharing throw blocks #endif // !FEATURE_EH_FUNCLETS // Returns a flowList representing the "EH predecessors" of "blk". These are the normal predecessors of // "blk", plus one special case: if "blk" is the first block of a handler, considers the predecessor(s) of the first // first block of the corresponding try region to be "EH predecessors". (If there is a single such predecessor, // for example, we want to consider that the immediate dominator of the catch clause start block, so it's // convenient to also consider it a predecessor.) flowList* BlockPredsWithEH(BasicBlock* blk); // This table is useful for memoization of the method above. typedef JitHashTable<BasicBlock*, JitPtrKeyFuncs<BasicBlock>, flowList*> BlockToFlowListMap; BlockToFlowListMap* m_blockToEHPreds; BlockToFlowListMap* GetBlockToEHPreds() { if (m_blockToEHPreds == nullptr) { m_blockToEHPreds = new (getAllocator()) BlockToFlowListMap(getAllocator()); } return m_blockToEHPreds; } void* ehEmitCookie(BasicBlock* block); UNATIVE_OFFSET ehCodeOffset(BasicBlock* block); EHblkDsc* ehInitHndRange(BasicBlock* src, IL_OFFSET* hndBeg, IL_OFFSET* hndEnd, bool* inFilter); EHblkDsc* ehInitTryRange(BasicBlock* src, IL_OFFSET* tryBeg, IL_OFFSET* tryEnd); EHblkDsc* ehInitHndBlockRange(BasicBlock* blk, BasicBlock** hndBeg, BasicBlock** hndLast, bool* inFilter); EHblkDsc* ehInitTryBlockRange(BasicBlock* blk, BasicBlock** tryBeg, BasicBlock** tryLast); void fgSetTryBeg(EHblkDsc* handlerTab, BasicBlock* newTryBeg); void fgSetTryEnd(EHblkDsc* handlerTab, BasicBlock* newTryLast); void fgSetHndEnd(EHblkDsc* handlerTab, BasicBlock* newHndLast); void fgSkipRmvdBlocks(EHblkDsc* handlerTab); void fgAllocEHTable(); void fgRemoveEHTableEntry(unsigned XTnum); #if defined(FEATURE_EH_FUNCLETS) EHblkDsc* fgAddEHTableEntry(unsigned XTnum); #endif // FEATURE_EH_FUNCLETS #if !FEATURE_EH void fgRemoveEH(); #endif // !FEATURE_EH void fgSortEHTable(); // Causes the EH table to obey some well-formedness conditions, by inserting // empty BB's when necessary: // * No block is both the first block of a handler and the first block of a try. // * No block is the first block of multiple 'try' regions. // * No block is the last block of multiple EH regions. void fgNormalizeEH(); bool fgNormalizeEHCase1(); bool fgNormalizeEHCase2(); bool fgNormalizeEHCase3(); void fgCheckForLoopsInHandlers(); #ifdef DEBUG void dispIncomingEHClause(unsigned num, const CORINFO_EH_CLAUSE& clause); void dispOutgoingEHClause(unsigned num, const CORINFO_EH_CLAUSE& clause); void fgVerifyHandlerTab(); void fgDispHandlerTab(); #endif // DEBUG bool fgNeedToSortEHTable; void verInitEHTree(unsigned numEHClauses); void verInsertEhNode(CORINFO_EH_CLAUSE* clause, EHblkDsc* handlerTab); void verInsertEhNodeInTree(EHNodeDsc** ppRoot, EHNodeDsc* node); void verInsertEhNodeParent(EHNodeDsc** ppRoot, EHNodeDsc* node); void verCheckNestingLevel(EHNodeDsc* initRoot); /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX GenTree and BasicBlock XX XX XX XX Functions to allocate and display the GenTrees and BasicBlocks XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ // Functions to create nodes Statement* gtNewStmt(GenTree* expr = nullptr); Statement* gtNewStmt(GenTree* expr, const DebugInfo& di); // For unary opers. GenTree* gtNewOperNode(genTreeOps oper, var_types type, GenTree* op1, bool doSimplifications = TRUE); // For binary opers. GenTree* gtNewOperNode(genTreeOps oper, var_types type, GenTree* op1, GenTree* op2); GenTreeColon* gtNewColonNode(var_types type, GenTree* elseNode, GenTree* thenNode); GenTreeQmark* gtNewQmarkNode(var_types type, GenTree* cond, GenTreeColon* colon); GenTree* gtNewLargeOperNode(genTreeOps oper, var_types type = TYP_I_IMPL, GenTree* op1 = nullptr, GenTree* op2 = nullptr); GenTreeIntCon* gtNewIconNode(ssize_t value, var_types type = TYP_INT); GenTreeIntCon* gtNewIconNode(unsigned fieldOffset, FieldSeqNode* fieldSeq); GenTreeIntCon* gtNewNull(); GenTreeIntCon* gtNewTrue(); GenTreeIntCon* gtNewFalse(); GenTree* gtNewPhysRegNode(regNumber reg, var_types type); GenTree* gtNewJmpTableNode(); GenTree* gtNewIndOfIconHandleNode(var_types indType, size_t value, GenTreeFlags iconFlags, bool isInvariant); GenTree* gtNewIconHandleNode(size_t value, GenTreeFlags flags, FieldSeqNode* fields = nullptr); GenTreeFlags gtTokenToIconFlags(unsigned token); GenTree* gtNewIconEmbHndNode(void* value, void* pValue, GenTreeFlags flags, void* compileTimeHandle); GenTree* gtNewIconEmbScpHndNode(CORINFO_MODULE_HANDLE scpHnd); GenTree* gtNewIconEmbClsHndNode(CORINFO_CLASS_HANDLE clsHnd); GenTree* gtNewIconEmbMethHndNode(CORINFO_METHOD_HANDLE methHnd); GenTree* gtNewIconEmbFldHndNode(CORINFO_FIELD_HANDLE fldHnd); GenTree* gtNewStringLiteralNode(InfoAccessType iat, void* pValue); GenTreeIntCon* gtNewStringLiteralLength(GenTreeStrCon* node); GenTree* gtNewLconNode(__int64 value); GenTree* gtNewDconNode(double value, var_types type = TYP_DOUBLE); GenTree* gtNewSconNode(int CPX, CORINFO_MODULE_HANDLE scpHandle); GenTree* gtNewZeroConNode(var_types type); GenTree* gtNewOneConNode(var_types type); GenTreeLclVar* gtNewStoreLclVar(unsigned dstLclNum, GenTree* src); #ifdef FEATURE_SIMD GenTree* gtNewSIMDVectorZero(var_types simdType, CorInfoType simdBaseJitType, unsigned simdSize); #endif GenTree* gtNewBlkOpNode(GenTree* dst, GenTree* srcOrFillVal, bool isVolatile, bool isCopyBlock); GenTree* gtNewPutArgReg(var_types type, GenTree* arg, regNumber argReg); GenTree* gtNewBitCastNode(var_types type, GenTree* arg); protected: void gtBlockOpInit(GenTree* result, GenTree* dst, GenTree* srcOrFillVal, bool isVolatile); public: GenTreeObj* gtNewObjNode(CORINFO_CLASS_HANDLE structHnd, GenTree* addr); void gtSetObjGcInfo(GenTreeObj* objNode); GenTree* gtNewStructVal(CORINFO_CLASS_HANDLE structHnd, GenTree* addr); GenTree* gtNewBlockVal(GenTree* addr, unsigned size); GenTree* gtNewCpObjNode(GenTree* dst, GenTree* src, CORINFO_CLASS_HANDLE structHnd, bool isVolatile); GenTreeCall::Use* gtNewCallArgs(GenTree* node); GenTreeCall::Use* gtNewCallArgs(GenTree* node1, GenTree* node2); GenTreeCall::Use* gtNewCallArgs(GenTree* node1, GenTree* node2, GenTree* node3); GenTreeCall::Use* gtNewCallArgs(GenTree* node1, GenTree* node2, GenTree* node3, GenTree* node4); GenTreeCall::Use* gtPrependNewCallArg(GenTree* node, GenTreeCall::Use* args); GenTreeCall::Use* gtInsertNewCallArgAfter(GenTree* node, GenTreeCall::Use* after); GenTreeCall* gtNewCallNode(gtCallTypes callType, CORINFO_METHOD_HANDLE handle, var_types type, GenTreeCall::Use* args, const DebugInfo& di = DebugInfo()); GenTreeCall* gtNewIndCallNode(GenTree* addr, var_types type, GenTreeCall::Use* args, const DebugInfo& di = DebugInfo()); GenTreeCall* gtNewHelperCallNode(unsigned helper, var_types type, GenTreeCall::Use* args = nullptr); GenTreeCall* gtNewRuntimeLookupHelperCallNode(CORINFO_RUNTIME_LOOKUP* pRuntimeLookup, GenTree* ctxTree, void* compileTimeHandle); GenTreeLclVar* gtNewLclvNode(unsigned lnum, var_types type DEBUGARG(IL_OFFSET offs = BAD_IL_OFFSET)); GenTreeLclVar* gtNewLclLNode(unsigned lnum, var_types type DEBUGARG(IL_OFFSET offs = BAD_IL_OFFSET)); GenTreeLclVar* gtNewLclVarAddrNode(unsigned lclNum, var_types type = TYP_I_IMPL); GenTreeLclFld* gtNewLclFldAddrNode(unsigned lclNum, unsigned lclOffs, FieldSeqNode* fieldSeq, var_types type = TYP_I_IMPL); #ifdef FEATURE_SIMD GenTreeSIMD* gtNewSIMDNode( var_types type, GenTree* op1, SIMDIntrinsicID simdIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize); GenTreeSIMD* gtNewSIMDNode(var_types type, GenTree* op1, GenTree* op2, SIMDIntrinsicID simdIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize); void SetOpLclRelatedToSIMDIntrinsic(GenTree* op); #endif #ifdef FEATURE_HW_INTRINSICS GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, GenTree* op1, GenTree* op2, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, GenTree* op1, GenTree* op2, GenTree* op3, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, GenTree* op1, GenTree* op2, GenTree* op3, GenTree* op4, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, GenTree** operands, size_t operandCount, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, IntrinsicNodeBuilder&& nodeBuilder, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode(var_types type, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize) { bool isSimdAsHWIntrinsic = true; return gtNewSimdHWIntrinsicNode(type, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); } GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode( var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize) { bool isSimdAsHWIntrinsic = true; return gtNewSimdHWIntrinsicNode(type, op1, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); } GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode(var_types type, GenTree* op1, GenTree* op2, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize) { bool isSimdAsHWIntrinsic = true; return gtNewSimdHWIntrinsicNode(type, op1, op2, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); } GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode(var_types type, GenTree* op1, GenTree* op2, GenTree* op3, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize) { bool isSimdAsHWIntrinsic = true; return gtNewSimdHWIntrinsicNode(type, op1, op2, op3, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); } GenTree* gtNewSimdAbsNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdBinOpNode(genTreeOps op, var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdCeilNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdCmpOpNode(genTreeOps op, var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdCmpOpAllNode(genTreeOps op, var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdCmpOpAnyNode(genTreeOps op, var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdCndSelNode(var_types type, GenTree* op1, GenTree* op2, GenTree* op3, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdCreateBroadcastNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdDotProdNode(var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdFloorNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdGetElementNode(var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdMaxNode(var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdMinNode(var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdNarrowNode(var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdSqrtNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdSumNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdUnOpNode(genTreeOps op, var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdWidenLowerNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdWidenUpperNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdWithElementNode(var_types type, GenTree* op1, GenTree* op2, GenTree* op3, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdZeroNode(var_types type, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTreeHWIntrinsic* gtNewScalarHWIntrinsicNode(var_types type, NamedIntrinsic hwIntrinsicID); GenTreeHWIntrinsic* gtNewScalarHWIntrinsicNode(var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID); GenTreeHWIntrinsic* gtNewScalarHWIntrinsicNode(var_types type, GenTree* op1, GenTree* op2, NamedIntrinsic hwIntrinsicID); GenTreeHWIntrinsic* gtNewScalarHWIntrinsicNode( var_types type, GenTree* op1, GenTree* op2, GenTree* op3, NamedIntrinsic hwIntrinsicID); CORINFO_CLASS_HANDLE gtGetStructHandleForHWSIMD(var_types simdType, CorInfoType simdBaseJitType); CorInfoType getBaseJitTypeFromArgIfNeeded(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, CorInfoType simdBaseJitType); #endif // FEATURE_HW_INTRINSICS GenTree* gtNewMustThrowException(unsigned helper, var_types type, CORINFO_CLASS_HANDLE clsHnd); GenTreeLclFld* gtNewLclFldNode(unsigned lnum, var_types type, unsigned offset); GenTree* gtNewInlineCandidateReturnExpr(GenTree* inlineCandidate, var_types type, BasicBlockFlags bbFlags); GenTreeField* gtNewFieldRef(var_types type, CORINFO_FIELD_HANDLE fldHnd, GenTree* obj = nullptr, DWORD offset = 0); GenTree* gtNewIndexRef(var_types typ, GenTree* arrayOp, GenTree* indexOp); GenTreeArrLen* gtNewArrLen(var_types typ, GenTree* arrayOp, int lenOffset, BasicBlock* block); GenTreeIndir* gtNewIndir(var_types typ, GenTree* addr); GenTree* gtNewNullCheck(GenTree* addr, BasicBlock* basicBlock); var_types gtTypeForNullCheck(GenTree* tree); void gtChangeOperToNullCheck(GenTree* tree, BasicBlock* block); static fgArgTabEntry* gtArgEntryByArgNum(GenTreeCall* call, unsigned argNum); static fgArgTabEntry* gtArgEntryByNode(GenTreeCall* call, GenTree* node); fgArgTabEntry* gtArgEntryByLateArgIndex(GenTreeCall* call, unsigned lateArgInx); static GenTree* gtArgNodeByLateArgInx(GenTreeCall* call, unsigned lateArgInx); GenTreeOp* gtNewAssignNode(GenTree* dst, GenTree* src); GenTree* gtNewTempAssign(unsigned tmp, GenTree* val, Statement** pAfterStmt = nullptr, const DebugInfo& di = DebugInfo(), BasicBlock* block = nullptr); GenTree* gtNewRefCOMfield(GenTree* objPtr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_ACCESS_FLAGS access, CORINFO_FIELD_INFO* pFieldInfo, var_types lclTyp, CORINFO_CLASS_HANDLE structType, GenTree* assg); GenTree* gtNewNothingNode(); GenTree* gtNewArgPlaceHolderNode(var_types type, CORINFO_CLASS_HANDLE clsHnd); GenTree* gtUnusedValNode(GenTree* expr); GenTree* gtNewKeepAliveNode(GenTree* op); GenTreeCast* gtNewCastNode(var_types typ, GenTree* op1, bool fromUnsigned, var_types castType); GenTreeCast* gtNewCastNodeL(var_types typ, GenTree* op1, bool fromUnsigned, var_types castType); GenTreeAllocObj* gtNewAllocObjNode( unsigned int helper, bool helperHasSideEffects, CORINFO_CLASS_HANDLE clsHnd, var_types type, GenTree* op1); GenTreeAllocObj* gtNewAllocObjNode(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool useParent); GenTree* gtNewRuntimeLookup(CORINFO_GENERIC_HANDLE hnd, CorInfoGenericHandleType hndTyp, GenTree* lookupTree); GenTreeIndir* gtNewMethodTableLookup(GenTree* obj); //------------------------------------------------------------------------ // Other GenTree functions GenTree* gtClone(GenTree* tree, bool complexOK = false); // If `tree` is a lclVar with lclNum `varNum`, return an IntCns with value `varVal`; otherwise, // create a copy of `tree`, adding specified flags, replacing uses of lclVar `deepVarNum` with // IntCnses with value `deepVarVal`. GenTree* gtCloneExpr( GenTree* tree, GenTreeFlags addFlags, unsigned varNum, int varVal, unsigned deepVarNum, int deepVarVal); // Create a copy of `tree`, optionally adding specifed flags, and optionally mapping uses of local // `varNum` to int constants with value `varVal`. GenTree* gtCloneExpr(GenTree* tree, GenTreeFlags addFlags = GTF_EMPTY, unsigned varNum = BAD_VAR_NUM, int varVal = 0) { return gtCloneExpr(tree, addFlags, varNum, varVal, varNum, varVal); } Statement* gtCloneStmt(Statement* stmt) { GenTree* exprClone = gtCloneExpr(stmt->GetRootNode()); return gtNewStmt(exprClone, stmt->GetDebugInfo()); } // Internal helper for cloning a call GenTreeCall* gtCloneExprCallHelper(GenTreeCall* call, GenTreeFlags addFlags = GTF_EMPTY, unsigned deepVarNum = BAD_VAR_NUM, int deepVarVal = 0); // Create copy of an inline or guarded devirtualization candidate tree. GenTreeCall* gtCloneCandidateCall(GenTreeCall* call); void gtUpdateSideEffects(Statement* stmt, GenTree* tree); void gtUpdateTreeAncestorsSideEffects(GenTree* tree); void gtUpdateStmtSideEffects(Statement* stmt); void gtUpdateNodeSideEffects(GenTree* tree); void gtUpdateNodeOperSideEffects(GenTree* tree); void gtUpdateNodeOperSideEffectsPost(GenTree* tree); // Returns "true" iff the complexity (not formally defined, but first interpretation // is #of nodes in subtree) of "tree" is greater than "limit". // (This is somewhat redundant with the "GetCostEx()/GetCostSz()" fields, but can be used // before they have been set.) bool gtComplexityExceeds(GenTree** tree, unsigned limit); GenTree* gtReverseCond(GenTree* tree); static bool gtHasRef(GenTree* tree, ssize_t lclNum); bool gtHasLocalsWithAddrOp(GenTree* tree); unsigned gtSetCallArgsOrder(const GenTreeCall::UseList& args, bool lateArgs, int* callCostEx, int* callCostSz); unsigned gtSetMultiOpOrder(GenTreeMultiOp* multiOp); void gtWalkOp(GenTree** op1, GenTree** op2, GenTree* base, bool constOnly); #ifdef DEBUG unsigned gtHashValue(GenTree* tree); GenTree* gtWalkOpEffectiveVal(GenTree* op); #endif void gtPrepareCost(GenTree* tree); bool gtIsLikelyRegVar(GenTree* tree); // Returns true iff the secondNode can be swapped with firstNode. bool gtCanSwapOrder(GenTree* firstNode, GenTree* secondNode); // Given an address expression, compute its costs and addressing mode opportunities, // and mark addressing mode candidates as GTF_DONT_CSE. // TODO-Throughput - Consider actually instantiating these early, to avoid // having to re-run the algorithm that looks for them (might also improve CQ). bool gtMarkAddrMode(GenTree* addr, int* costEx, int* costSz, var_types type); unsigned gtSetEvalOrder(GenTree* tree); void gtSetStmtInfo(Statement* stmt); // Returns "true" iff "node" has any of the side effects in "flags". bool gtNodeHasSideEffects(GenTree* node, GenTreeFlags flags); // Returns "true" iff "tree" or its (transitive) children have any of the side effects in "flags". bool gtTreeHasSideEffects(GenTree* tree, GenTreeFlags flags); // Appends 'expr' in front of 'list' // 'list' will typically start off as 'nullptr' // when 'list' is non-null a GT_COMMA node is used to insert 'expr' GenTree* gtBuildCommaList(GenTree* list, GenTree* expr); void gtExtractSideEffList(GenTree* expr, GenTree** pList, GenTreeFlags GenTreeFlags = GTF_SIDE_EFFECT, bool ignoreRoot = false); GenTree* gtGetThisArg(GenTreeCall* call); // Static fields of struct types (and sometimes the types that those are reduced to) are represented by having the // static field contain an object pointer to the boxed struct. This simplifies the GC implementation...but // complicates the JIT somewhat. This predicate returns "true" iff a node with type "fieldNodeType", representing // the given "fldHnd", is such an object pointer. bool gtIsStaticFieldPtrToBoxedStruct(var_types fieldNodeType, CORINFO_FIELD_HANDLE fldHnd); // Return true if call is a recursive call; return false otherwise. // Note when inlining, this looks for calls back to the root method. bool gtIsRecursiveCall(GenTreeCall* call) { return gtIsRecursiveCall(call->gtCallMethHnd); } bool gtIsRecursiveCall(CORINFO_METHOD_HANDLE callMethodHandle) { return (callMethodHandle == impInlineRoot()->info.compMethodHnd); } //------------------------------------------------------------------------- GenTree* gtFoldExpr(GenTree* tree); GenTree* gtFoldExprConst(GenTree* tree); GenTree* gtFoldExprSpecial(GenTree* tree); GenTree* gtFoldBoxNullable(GenTree* tree); GenTree* gtFoldExprCompare(GenTree* tree); GenTree* gtCreateHandleCompare(genTreeOps oper, GenTree* op1, GenTree* op2, CorInfoInlineTypeCheck typeCheckInliningResult); GenTree* gtFoldExprCall(GenTreeCall* call); GenTree* gtFoldTypeCompare(GenTree* tree); GenTree* gtFoldTypeEqualityCall(bool isEq, GenTree* op1, GenTree* op2); // Options to control behavior of gtTryRemoveBoxUpstreamEffects enum BoxRemovalOptions { BR_REMOVE_AND_NARROW, // remove effects, minimize remaining work, return possibly narrowed source tree BR_REMOVE_AND_NARROW_WANT_TYPE_HANDLE, // remove effects and minimize remaining work, return type handle tree BR_REMOVE_BUT_NOT_NARROW, // remove effects, return original source tree BR_DONT_REMOVE, // check if removal is possible, return copy source tree BR_DONT_REMOVE_WANT_TYPE_HANDLE, // check if removal is possible, return type handle tree BR_MAKE_LOCAL_COPY // revise box to copy to temp local and return local's address }; GenTree* gtTryRemoveBoxUpstreamEffects(GenTree* tree, BoxRemovalOptions options = BR_REMOVE_AND_NARROW); GenTree* gtOptimizeEnumHasFlag(GenTree* thisOp, GenTree* flagOp); //------------------------------------------------------------------------- // Get the handle, if any. CORINFO_CLASS_HANDLE gtGetStructHandleIfPresent(GenTree* tree); // Get the handle, and assert if not found. CORINFO_CLASS_HANDLE gtGetStructHandle(GenTree* tree); // Get the handle for a ref type. CORINFO_CLASS_HANDLE gtGetClassHandle(GenTree* tree, bool* pIsExact, bool* pIsNonNull); // Get the class handle for an helper call CORINFO_CLASS_HANDLE gtGetHelperCallClassHandle(GenTreeCall* call, bool* pIsExact, bool* pIsNonNull); // Get the element handle for an array of ref type. CORINFO_CLASS_HANDLE gtGetArrayElementClassHandle(GenTree* array); // Get a class handle from a helper call argument CORINFO_CLASS_HANDLE gtGetHelperArgClassHandle(GenTree* array); // Get the class handle for a field CORINFO_CLASS_HANDLE gtGetFieldClassHandle(CORINFO_FIELD_HANDLE fieldHnd, bool* pIsExact, bool* pIsNonNull); // Check if this tree is a gc static base helper call bool gtIsStaticGCBaseHelperCall(GenTree* tree); //------------------------------------------------------------------------- // Functions to display the trees #ifdef DEBUG void gtDispNode(GenTree* tree, IndentStack* indentStack, _In_z_ const char* msg, bool isLIR); void gtDispConst(GenTree* tree); void gtDispLeaf(GenTree* tree, IndentStack* indentStack); void gtDispNodeName(GenTree* tree); #if FEATURE_MULTIREG_RET unsigned gtDispMultiRegCount(GenTree* tree); #endif void gtDispRegVal(GenTree* tree); void gtDispZeroFieldSeq(GenTree* tree); void gtDispVN(GenTree* tree); void gtDispCommonEndLine(GenTree* tree); enum IndentInfo { IINone, IIArc, IIArcTop, IIArcBottom, IIEmbedded, IIError, IndentInfoCount }; void gtDispChild(GenTree* child, IndentStack* indentStack, IndentInfo arcType, _In_opt_ const char* msg = nullptr, bool topOnly = false); void gtDispTree(GenTree* tree, IndentStack* indentStack = nullptr, _In_opt_ const char* msg = nullptr, bool topOnly = false, bool isLIR = false); void gtGetLclVarNameInfo(unsigned lclNum, const char** ilKindOut, const char** ilNameOut, unsigned* ilNumOut); int gtGetLclVarName(unsigned lclNum, char* buf, unsigned buf_remaining); char* gtGetLclVarName(unsigned lclNum); void gtDispLclVar(unsigned lclNum, bool padForBiggestDisp = true); void gtDispLclVarStructType(unsigned lclNum); void gtDispClassLayout(ClassLayout* layout, var_types type); void gtDispILLocation(const ILLocation& loc); void gtDispStmt(Statement* stmt, const char* msg = nullptr); void gtDispBlockStmts(BasicBlock* block); void gtGetArgMsg(GenTreeCall* call, GenTree* arg, unsigned argNum, char* bufp, unsigned bufLength); void gtGetLateArgMsg(GenTreeCall* call, GenTree* arg, int argNum, char* bufp, unsigned bufLength); void gtDispArgList(GenTreeCall* call, GenTree* lastCallOperand, IndentStack* indentStack); void gtDispAnyFieldSeq(FieldSeqNode* fieldSeq); void gtDispFieldSeq(FieldSeqNode* pfsn); void gtDispRange(LIR::ReadOnlyRange const& range); void gtDispTreeRange(LIR::Range& containingRange, GenTree* tree); void gtDispLIRNode(GenTree* node, const char* prefixMsg = nullptr); #endif // For tree walks enum fgWalkResult { WALK_CONTINUE, WALK_SKIP_SUBTREES, WALK_ABORT }; struct fgWalkData; typedef fgWalkResult(fgWalkPreFn)(GenTree** pTree, fgWalkData* data); typedef fgWalkResult(fgWalkPostFn)(GenTree** pTree, fgWalkData* data); static fgWalkPreFn gtMarkColonCond; static fgWalkPreFn gtClearColonCond; struct FindLinkData { GenTree* nodeToFind; GenTree** result; GenTree* parent; }; FindLinkData gtFindLink(Statement* stmt, GenTree* node); bool gtHasCatchArg(GenTree* tree); typedef ArrayStack<GenTree*> GenTreeStack; static bool gtHasCallOnStack(GenTreeStack* parentStack); //========================================================================= // BasicBlock functions #ifdef DEBUG // This is a debug flag we will use to assert when creating block during codegen // as this interferes with procedure splitting. If you know what you're doing, set // it to true before creating the block. (DEBUG only) bool fgSafeBasicBlockCreation; #endif BasicBlock* bbNewBasicBlock(BBjumpKinds jumpKind); void placeLoopAlignInstructions(); /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX LclVarsInfo XX XX XX XX The variables to be used by the code generator. XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ // // For both PROMOTION_TYPE_NONE and PROMOTION_TYPE_DEPENDENT the struct will // be placed in the stack frame and it's fields must be laid out sequentially. // // For PROMOTION_TYPE_INDEPENDENT each of the struct's fields is replaced by // a local variable that can be enregistered or placed in the stack frame. // The fields do not need to be laid out sequentially // enum lvaPromotionType { PROMOTION_TYPE_NONE, // The struct local is not promoted PROMOTION_TYPE_INDEPENDENT, // The struct local is promoted, // and its field locals are independent of its parent struct local. PROMOTION_TYPE_DEPENDENT // The struct local is promoted, // but its field locals depend on its parent struct local. }; /*****************************************************************************/ enum FrameLayoutState { NO_FRAME_LAYOUT, INITIAL_FRAME_LAYOUT, PRE_REGALLOC_FRAME_LAYOUT, REGALLOC_FRAME_LAYOUT, TENTATIVE_FRAME_LAYOUT, FINAL_FRAME_LAYOUT }; public: RefCountState lvaRefCountState; // Current local ref count state bool lvaLocalVarRefCounted() const { return lvaRefCountState == RCS_NORMAL; } bool lvaTrackedFixed; // true: We cannot add new 'tracked' variable unsigned lvaCount; // total number of locals, which includes function arguments, // special arguments, IL local variables, and JIT temporary variables LclVarDsc* lvaTable; // variable descriptor table unsigned lvaTableCnt; // lvaTable size (>= lvaCount) unsigned lvaTrackedCount; // actual # of locals being tracked unsigned lvaTrackedCountInSizeTUnits; // min # of size_t's sufficient to hold a bit for all the locals being tracked #ifdef DEBUG VARSET_TP lvaTrackedVars; // set of tracked variables #endif #ifndef TARGET_64BIT VARSET_TP lvaLongVars; // set of long (64-bit) variables #endif VARSET_TP lvaFloatVars; // set of floating-point (32-bit and 64-bit) variables unsigned lvaCurEpoch; // VarSets are relative to a specific set of tracked var indices. // It that changes, this changes. VarSets from different epochs // cannot be meaningfully combined. unsigned GetCurLVEpoch() { return lvaCurEpoch; } // reverse map of tracked number to var number unsigned lvaTrackedToVarNumSize; unsigned* lvaTrackedToVarNum; #if DOUBLE_ALIGN #ifdef DEBUG // # of procs compiled a with double-aligned stack static unsigned s_lvaDoubleAlignedProcsCount; #endif #endif // Getters and setters for address-exposed and do-not-enregister local var properties. bool lvaVarAddrExposed(unsigned varNum) const; void lvaSetVarAddrExposed(unsigned varNum DEBUGARG(AddressExposedReason reason)); void lvaSetVarLiveInOutOfHandler(unsigned varNum); bool lvaVarDoNotEnregister(unsigned varNum); void lvSetMinOptsDoNotEnreg(); bool lvaEnregEHVars; bool lvaEnregMultiRegVars; void lvaSetVarDoNotEnregister(unsigned varNum DEBUGARG(DoNotEnregisterReason reason)); unsigned lvaVarargsHandleArg; #ifdef TARGET_X86 unsigned lvaVarargsBaseOfStkArgs; // Pointer (computed based on incoming varargs handle) to the start of the stack // arguments #endif // TARGET_X86 unsigned lvaInlinedPInvokeFrameVar; // variable representing the InlinedCallFrame unsigned lvaReversePInvokeFrameVar; // variable representing the reverse PInvoke frame #if FEATURE_FIXED_OUT_ARGS unsigned lvaPInvokeFrameRegSaveVar; // variable representing the RegSave for PInvoke inlining. #endif unsigned lvaMonAcquired; // boolean variable introduced into in synchronized methods // that tracks whether the lock has been taken unsigned lvaArg0Var; // The lclNum of arg0. Normally this will be info.compThisArg. // However, if there is a "ldarga 0" or "starg 0" in the IL, // we will redirect all "ldarg(a) 0" and "starg 0" to this temp. unsigned lvaInlineeReturnSpillTemp; // The temp to spill the non-VOID return expression // in case there are multiple BBJ_RETURN blocks in the inlinee // or if the inlinee has GC ref locals. #if FEATURE_FIXED_OUT_ARGS unsigned lvaOutgoingArgSpaceVar; // dummy TYP_LCLBLK var for fixed outgoing argument space PhasedVar<unsigned> lvaOutgoingArgSpaceSize; // size of fixed outgoing argument space #endif // FEATURE_FIXED_OUT_ARGS static unsigned GetOutgoingArgByteSize(unsigned sizeWithoutPadding) { return roundUp(sizeWithoutPadding, TARGET_POINTER_SIZE); } // Variable representing the return address. The helper-based tailcall // mechanism passes the address of the return address to a runtime helper // where it is used to detect tail-call chains. unsigned lvaRetAddrVar; #if defined(DEBUG) && defined(TARGET_XARCH) unsigned lvaReturnSpCheck; // Stores SP to confirm it is not corrupted on return. #endif // defined(DEBUG) && defined(TARGET_XARCH) #if defined(DEBUG) && defined(TARGET_X86) unsigned lvaCallSpCheck; // Stores SP to confirm it is not corrupted after every call. #endif // defined(DEBUG) && defined(TARGET_X86) bool lvaGenericsContextInUse; bool lvaKeepAliveAndReportThis(); // Synchronized instance method of a reference type, or // CORINFO_GENERICS_CTXT_FROM_THIS? bool lvaReportParamTypeArg(); // Exceptions and CORINFO_GENERICS_CTXT_FROM_PARAMTYPEARG? //------------------------------------------------------------------------- // All these frame offsets are inter-related and must be kept in sync #if !defined(FEATURE_EH_FUNCLETS) // This is used for the callable handlers unsigned lvaShadowSPslotsVar; // TYP_BLK variable for all the shadow SP slots #endif // FEATURE_EH_FUNCLETS int lvaCachedGenericContextArgOffs; int lvaCachedGenericContextArgOffset(); // For CORINFO_CALLCONV_PARAMTYPE and if generic context is passed as // THIS pointer #ifdef JIT32_GCENCODER unsigned lvaLocAllocSPvar; // variable which stores the value of ESP after the the last alloca/localloc #endif // JIT32_GCENCODER unsigned lvaNewObjArrayArgs; // variable with arguments for new MD array helper // TODO-Review: Prior to reg predict we reserve 24 bytes for Spill temps. // after the reg predict we will use a computed maxTmpSize // which is based upon the number of spill temps predicted by reg predict // All this is necessary because if we under-estimate the size of the spill // temps we could fail when encoding instructions that reference stack offsets for ARM. // // Pre codegen max spill temp size. static const unsigned MAX_SPILL_TEMP_SIZE = 24; //------------------------------------------------------------------------- unsigned lvaGetMaxSpillTempSize(); #ifdef TARGET_ARM bool lvaIsPreSpilled(unsigned lclNum, regMaskTP preSpillMask); #endif // TARGET_ARM void lvaAssignFrameOffsets(FrameLayoutState curState); void lvaFixVirtualFrameOffsets(); void lvaUpdateArgWithInitialReg(LclVarDsc* varDsc); void lvaUpdateArgsWithInitialReg(); void lvaAssignVirtualFrameOffsetsToArgs(); #ifdef UNIX_AMD64_ABI int lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, unsigned argSize, int argOffs, int* callerArgOffset); #else // !UNIX_AMD64_ABI int lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, unsigned argSize, int argOffs); #endif // !UNIX_AMD64_ABI void lvaAssignVirtualFrameOffsetsToLocals(); int lvaAllocLocalAndSetVirtualOffset(unsigned lclNum, unsigned size, int stkOffs); #ifdef TARGET_AMD64 // Returns true if compCalleeRegsPushed (including RBP if used as frame pointer) is even. bool lvaIsCalleeSavedIntRegCountEven(); #endif void lvaAlignFrame(); void lvaAssignFrameOffsetsToPromotedStructs(); int lvaAllocateTemps(int stkOffs, bool mustDoubleAlign); #ifdef DEBUG void lvaDumpRegLocation(unsigned lclNum); void lvaDumpFrameLocation(unsigned lclNum); void lvaDumpEntry(unsigned lclNum, FrameLayoutState curState, size_t refCntWtdWidth = 6); void lvaTableDump(FrameLayoutState curState = NO_FRAME_LAYOUT); // NO_FRAME_LAYOUT means use the current frame // layout state defined by lvaDoneFrameLayout #endif // Limit frames size to 1GB. The maximum is 2GB in theory - make it intentionally smaller // to avoid bugs from borderline cases. #define MAX_FrameSize 0x3FFFFFFF void lvaIncrementFrameSize(unsigned size); unsigned lvaFrameSize(FrameLayoutState curState); // Returns the caller-SP-relative offset for the SP/FP relative offset determined by FP based. int lvaToCallerSPRelativeOffset(int offs, bool isFpBased, bool forRootFrame = true) const; // Returns the caller-SP-relative offset for the local variable "varNum." int lvaGetCallerSPRelativeOffset(unsigned varNum); // Returns the SP-relative offset for the local variable "varNum". Illegal to ask this for functions with localloc. int lvaGetSPRelativeOffset(unsigned varNum); int lvaToInitialSPRelativeOffset(unsigned offset, bool isFpBased); int lvaGetInitialSPRelativeOffset(unsigned varNum); // True if this is an OSR compilation and this local is potentially // located on the original method stack frame. bool lvaIsOSRLocal(unsigned varNum); //------------------------ For splitting types ---------------------------- void lvaInitTypeRef(); void lvaInitArgs(InitVarDscInfo* varDscInfo); void lvaInitThisPtr(InitVarDscInfo* varDscInfo); void lvaInitRetBuffArg(InitVarDscInfo* varDscInfo, bool useFixedRetBufReg); void lvaInitUserArgs(InitVarDscInfo* varDscInfo, unsigned skipArgs, unsigned takeArgs); void lvaInitGenericsCtxt(InitVarDscInfo* varDscInfo); void lvaInitVarArgsHandle(InitVarDscInfo* varDscInfo); void lvaInitVarDsc(LclVarDsc* varDsc, unsigned varNum, CorInfoType corInfoType, CORINFO_CLASS_HANDLE typeHnd, CORINFO_ARG_LIST_HANDLE varList, CORINFO_SIG_INFO* varSig); static unsigned lvaTypeRefMask(var_types type); var_types lvaGetActualType(unsigned lclNum); var_types lvaGetRealType(unsigned lclNum); //------------------------------------------------------------------------- void lvaInit(); LclVarDsc* lvaGetDesc(unsigned lclNum) { assert(lclNum < lvaCount); return &lvaTable[lclNum]; } LclVarDsc* lvaGetDesc(unsigned lclNum) const { assert(lclNum < lvaCount); return &lvaTable[lclNum]; } LclVarDsc* lvaGetDesc(const GenTreeLclVarCommon* lclVar) { return lvaGetDesc(lclVar->GetLclNum()); } unsigned lvaTrackedIndexToLclNum(unsigned trackedIndex) { assert(trackedIndex < lvaTrackedCount); unsigned lclNum = lvaTrackedToVarNum[trackedIndex]; assert(lclNum < lvaCount); return lclNum; } LclVarDsc* lvaGetDescByTrackedIndex(unsigned trackedIndex) { return lvaGetDesc(lvaTrackedIndexToLclNum(trackedIndex)); } unsigned lvaGetLclNum(const LclVarDsc* varDsc) { assert((lvaTable <= varDsc) && (varDsc < lvaTable + lvaCount)); // varDsc must point within the table assert(((char*)varDsc - (char*)lvaTable) % sizeof(LclVarDsc) == 0); // varDsc better not point in the middle of a variable unsigned varNum = (unsigned)(varDsc - lvaTable); assert(varDsc == &lvaTable[varNum]); return varNum; } unsigned lvaLclSize(unsigned varNum); unsigned lvaLclExactSize(unsigned varNum); bool lvaHaveManyLocals() const; unsigned lvaGrabTemp(bool shortLifetime DEBUGARG(const char* reason)); unsigned lvaGrabTemps(unsigned cnt DEBUGARG(const char* reason)); unsigned lvaGrabTempWithImplicitUse(bool shortLifetime DEBUGARG(const char* reason)); void lvaSortByRefCount(); void lvaMarkLocalVars(); // Local variable ref-counting void lvaComputeRefCounts(bool isRecompute, bool setSlotNumbers); void lvaMarkLocalVars(BasicBlock* block, bool isRecompute); void lvaAllocOutgoingArgSpaceVar(); // Set up lvaOutgoingArgSpaceVar VARSET_VALRET_TP lvaStmtLclMask(Statement* stmt); #ifdef DEBUG struct lvaStressLclFldArgs { Compiler* m_pCompiler; bool m_bFirstPass; }; static fgWalkPreFn lvaStressLclFldCB; void lvaStressLclFld(); void lvaDispVarSet(VARSET_VALARG_TP set, VARSET_VALARG_TP allVars); void lvaDispVarSet(VARSET_VALARG_TP set); #endif #ifdef TARGET_ARM int lvaFrameAddress(int varNum, bool mustBeFPBased, regNumber* pBaseReg, int addrModeOffset, bool isFloatUsage); #else int lvaFrameAddress(int varNum, bool* pFPbased); #endif bool lvaIsParameter(unsigned varNum); bool lvaIsRegArgument(unsigned varNum); bool lvaIsOriginalThisArg(unsigned varNum); // Is this varNum the original this argument? bool lvaIsOriginalThisReadOnly(); // return true if there is no place in the code // that writes to arg0 // For x64 this is 3, 5, 6, 7, >8 byte structs that are passed by reference. // For ARM64, this is structs larger than 16 bytes that are passed by reference. bool lvaIsImplicitByRefLocal(unsigned varNum) { #if defined(TARGET_AMD64) || defined(TARGET_ARM64) LclVarDsc* varDsc = lvaGetDesc(varNum); if (varDsc->lvIsImplicitByRef) { assert(varDsc->lvIsParam); assert(varTypeIsStruct(varDsc) || (varDsc->lvType == TYP_BYREF)); return true; } #endif // defined(TARGET_AMD64) || defined(TARGET_ARM64) return false; } // Returns true if this local var is a multireg struct bool lvaIsMultiregStruct(LclVarDsc* varDsc, bool isVararg); // If the local is a TYP_STRUCT, get/set a class handle describing it CORINFO_CLASS_HANDLE lvaGetStruct(unsigned varNum); void lvaSetStruct(unsigned varNum, CORINFO_CLASS_HANDLE typeHnd, bool unsafeValueClsCheck, bool setTypeInfo = true); void lvaSetStructUsedAsVarArg(unsigned varNum); // If the local is TYP_REF, set or update the associated class information. void lvaSetClass(unsigned varNum, CORINFO_CLASS_HANDLE clsHnd, bool isExact = false); void lvaSetClass(unsigned varNum, GenTree* tree, CORINFO_CLASS_HANDLE stackHandle = nullptr); void lvaUpdateClass(unsigned varNum, CORINFO_CLASS_HANDLE clsHnd, bool isExact = false); void lvaUpdateClass(unsigned varNum, GenTree* tree, CORINFO_CLASS_HANDLE stackHandle = nullptr); #define MAX_NumOfFieldsInPromotableStruct 4 // Maximum number of fields in promotable struct // Info about struct type fields. struct lvaStructFieldInfo { CORINFO_FIELD_HANDLE fldHnd; unsigned char fldOffset; unsigned char fldOrdinal; var_types fldType; unsigned fldSize; CORINFO_CLASS_HANDLE fldTypeHnd; lvaStructFieldInfo() : fldHnd(nullptr), fldOffset(0), fldOrdinal(0), fldType(TYP_UNDEF), fldSize(0), fldTypeHnd(nullptr) { } }; // Info about a struct type, instances of which may be candidates for promotion. struct lvaStructPromotionInfo { CORINFO_CLASS_HANDLE typeHnd; bool canPromote; bool containsHoles; bool customLayout; bool fieldsSorted; unsigned char fieldCnt; lvaStructFieldInfo fields[MAX_NumOfFieldsInPromotableStruct]; lvaStructPromotionInfo(CORINFO_CLASS_HANDLE typeHnd = nullptr) : typeHnd(typeHnd) , canPromote(false) , containsHoles(false) , customLayout(false) , fieldsSorted(false) , fieldCnt(0) { } }; struct lvaFieldOffsetCmp { bool operator()(const lvaStructFieldInfo& field1, const lvaStructFieldInfo& field2); }; // This class is responsible for checking validity and profitability of struct promotion. // If it is both legal and profitable, then TryPromoteStructVar promotes the struct and initializes // nessesary information for fgMorphStructField to use. class StructPromotionHelper { public: StructPromotionHelper(Compiler* compiler); bool CanPromoteStructType(CORINFO_CLASS_HANDLE typeHnd); bool TryPromoteStructVar(unsigned lclNum); void Clear() { structPromotionInfo.typeHnd = NO_CLASS_HANDLE; } #ifdef DEBUG void CheckRetypedAsScalar(CORINFO_FIELD_HANDLE fieldHnd, var_types requestedType); #endif // DEBUG private: bool CanPromoteStructVar(unsigned lclNum); bool ShouldPromoteStructVar(unsigned lclNum); void PromoteStructVar(unsigned lclNum); void SortStructFields(); bool CanConstructAndPromoteField(lvaStructPromotionInfo* structPromotionInfo); lvaStructFieldInfo GetFieldInfo(CORINFO_FIELD_HANDLE fieldHnd, BYTE ordinal); bool TryPromoteStructField(lvaStructFieldInfo& outerFieldInfo); private: Compiler* compiler; lvaStructPromotionInfo structPromotionInfo; #ifdef DEBUG typedef JitHashTable<CORINFO_FIELD_HANDLE, JitPtrKeyFuncs<CORINFO_FIELD_STRUCT_>, var_types> RetypedAsScalarFieldsMap; RetypedAsScalarFieldsMap retypedFieldsMap; #endif // DEBUG }; StructPromotionHelper* structPromotionHelper; unsigned lvaGetFieldLocal(const LclVarDsc* varDsc, unsigned int fldOffset); lvaPromotionType lvaGetPromotionType(const LclVarDsc* varDsc); lvaPromotionType lvaGetPromotionType(unsigned varNum); lvaPromotionType lvaGetParentPromotionType(const LclVarDsc* varDsc); lvaPromotionType lvaGetParentPromotionType(unsigned varNum); bool lvaIsFieldOfDependentlyPromotedStruct(const LclVarDsc* varDsc); bool lvaIsGCTracked(const LclVarDsc* varDsc); #if defined(FEATURE_SIMD) bool lvaMapSimd12ToSimd16(const LclVarDsc* varDsc) { assert(varDsc->lvType == TYP_SIMD12); assert(varDsc->lvExactSize == 12); #if defined(TARGET_64BIT) assert(compMacOsArm64Abi() || varDsc->lvSize() == 16); #endif // defined(TARGET_64BIT) // We make local variable SIMD12 types 16 bytes instead of just 12. // lvSize() will return 16 bytes for SIMD12, even for fields. // However, we can't do that mapping if the var is a dependently promoted struct field. // Such a field must remain its exact size within its parent struct unless it is a single // field *and* it is the only field in a struct of 16 bytes. if (varDsc->lvSize() != 16) { return false; } if (lvaIsFieldOfDependentlyPromotedStruct(varDsc)) { LclVarDsc* parentVarDsc = lvaGetDesc(varDsc->lvParentLcl); return (parentVarDsc->lvFieldCnt == 1) && (parentVarDsc->lvSize() == 16); } return true; } #endif // defined(FEATURE_SIMD) unsigned lvaGSSecurityCookie; // LclVar number bool lvaTempsHaveLargerOffsetThanVars(); // Returns "true" iff local variable "lclNum" is in SSA form. bool lvaInSsa(unsigned lclNum) { assert(lclNum < lvaCount); return lvaTable[lclNum].lvInSsa; } unsigned lvaStubArgumentVar; // variable representing the secret stub argument coming in EAX #if defined(FEATURE_EH_FUNCLETS) unsigned lvaPSPSym; // variable representing the PSPSym #endif InlineInfo* impInlineInfo; // Only present for inlinees InlineStrategy* m_inlineStrategy; InlineContext* compInlineContext; // Always present // The Compiler* that is the root of the inlining tree of which "this" is a member. Compiler* impInlineRoot(); #if defined(DEBUG) || defined(INLINE_DATA) unsigned __int64 getInlineCycleCount() { return m_compCycles; } #endif // defined(DEBUG) || defined(INLINE_DATA) bool fgNoStructPromotion; // Set to TRUE to turn off struct promotion for this method. bool fgNoStructParamPromotion; // Set to TRUE to turn off struct promotion for parameters this method. //========================================================================= // PROTECTED //========================================================================= protected: //---------------- Local variable ref-counting ---------------------------- void lvaMarkLclRefs(GenTree* tree, BasicBlock* block, Statement* stmt, bool isRecompute); bool IsDominatedByExceptionalEntry(BasicBlock* block); void SetVolatileHint(LclVarDsc* varDsc); // Keeps the mapping from SSA #'s to VN's for the implicit memory variables. SsaDefArray<SsaMemDef> lvMemoryPerSsaData; public: // Returns the address of the per-Ssa data for memory at the given ssaNum (which is required // not to be the SsaConfig::RESERVED_SSA_NUM, which indicates that the variable is // not an SSA variable). SsaMemDef* GetMemoryPerSsaData(unsigned ssaNum) { return lvMemoryPerSsaData.GetSsaDef(ssaNum); } /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Importer XX XX XX XX Imports the given method and converts it to semantic trees XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ private: // For prefixFlags enum { PREFIX_TAILCALL_EXPLICIT = 0x00000001, // call has "tail" IL prefix PREFIX_TAILCALL_IMPLICIT = 0x00000010, // call is treated as having "tail" prefix even though there is no "tail" IL prefix PREFIX_TAILCALL_STRESS = 0x00000100, // call doesn't "tail" IL prefix but is treated as explicit because of tail call stress PREFIX_TAILCALL = (PREFIX_TAILCALL_EXPLICIT | PREFIX_TAILCALL_IMPLICIT | PREFIX_TAILCALL_STRESS), PREFIX_VOLATILE = 0x00001000, PREFIX_UNALIGNED = 0x00010000, PREFIX_CONSTRAINED = 0x00100000, PREFIX_READONLY = 0x01000000 }; static void impValidateMemoryAccessOpcode(const BYTE* codeAddr, const BYTE* codeEndp, bool volatilePrefix); static OPCODE impGetNonPrefixOpcode(const BYTE* codeAddr, const BYTE* codeEndp); static bool impOpcodeIsCallOpcode(OPCODE opcode); public: void impInit(); void impImport(); CORINFO_CLASS_HANDLE impGetRefAnyClass(); CORINFO_CLASS_HANDLE impGetRuntimeArgumentHandle(); CORINFO_CLASS_HANDLE impGetTypeHandleClass(); CORINFO_CLASS_HANDLE impGetStringClass(); CORINFO_CLASS_HANDLE impGetObjectClass(); // Returns underlying type of handles returned by ldtoken instruction var_types GetRuntimeHandleUnderlyingType() { // RuntimeTypeHandle is backed by raw pointer on CoreRT and by object reference on other runtimes return IsTargetAbi(CORINFO_CORERT_ABI) ? TYP_I_IMPL : TYP_REF; } void impDevirtualizeCall(GenTreeCall* call, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_METHOD_HANDLE* method, unsigned* methodFlags, CORINFO_CONTEXT_HANDLE* contextHandle, CORINFO_CONTEXT_HANDLE* exactContextHandle, bool isLateDevirtualization, bool isExplicitTailCall, IL_OFFSET ilOffset = BAD_IL_OFFSET); //========================================================================= // PROTECTED //========================================================================= protected: //-------------------- Stack manipulation --------------------------------- unsigned impStkSize; // Size of the full stack #define SMALL_STACK_SIZE 16 // number of elements in impSmallStack struct SavedStack // used to save/restore stack contents. { unsigned ssDepth; // number of values on stack StackEntry* ssTrees; // saved tree values }; bool impIsPrimitive(CorInfoType type); bool impILConsumesAddr(const BYTE* codeAddr); void impResolveToken(const BYTE* addr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoTokenKind kind); void impPushOnStack(GenTree* tree, typeInfo ti); void impPushNullObjRefOnStack(); StackEntry impPopStack(); StackEntry& impStackTop(unsigned n = 0); unsigned impStackHeight(); void impSaveStackState(SavedStack* savePtr, bool copy); void impRestoreStackState(SavedStack* savePtr); GenTree* impImportLdvirtftn(GenTree* thisPtr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo); int impBoxPatternMatch(CORINFO_RESOLVED_TOKEN* pResolvedToken, const BYTE* codeAddr, const BYTE* codeEndp, bool makeInlineObservation = false); void impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken); void impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo); bool impCanPInvokeInline(); bool impCanPInvokeInlineCallSite(BasicBlock* block); void impCheckForPInvokeCall( GenTreeCall* call, CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* sig, unsigned mflags, BasicBlock* block); GenTreeCall* impImportIndirectCall(CORINFO_SIG_INFO* sig, const DebugInfo& di = DebugInfo()); void impPopArgsForUnmanagedCall(GenTree* call, CORINFO_SIG_INFO* sig); void impInsertHelperCall(CORINFO_HELPER_DESC* helperCall); void impHandleAccessAllowed(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall); void impHandleAccessAllowedInternal(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall); var_types impImportCall(OPCODE opcode, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call on a // type parameter? GenTree* newobjThis, int prefixFlags, CORINFO_CALL_INFO* callInfo, IL_OFFSET rawILOffset); CORINFO_CLASS_HANDLE impGetSpecialIntrinsicExactReturnType(CORINFO_METHOD_HANDLE specialIntrinsicHandle); bool impMethodInfo_hasRetBuffArg(CORINFO_METHOD_INFO* methInfo, CorInfoCallConvExtension callConv); GenTree* impFixupCallStructReturn(GenTreeCall* call, CORINFO_CLASS_HANDLE retClsHnd); GenTree* impFixupStructReturnType(GenTree* op, CORINFO_CLASS_HANDLE retClsHnd, CorInfoCallConvExtension unmgdCallConv); #ifdef DEBUG var_types impImportJitTestLabelMark(int numArgs); #endif // DEBUG GenTree* impInitClass(CORINFO_RESOLVED_TOKEN* pResolvedToken); GenTree* impImportStaticReadOnlyField(void* fldAddr, var_types lclTyp); GenTree* impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_ACCESS_FLAGS access, CORINFO_FIELD_INFO* pFieldInfo, var_types lclTyp); static void impBashVarAddrsToI(GenTree* tree1, GenTree* tree2 = nullptr); GenTree* impImplicitIorI4Cast(GenTree* tree, var_types dstTyp); GenTree* impImplicitR4orR8Cast(GenTree* tree, var_types dstTyp); void impImportLeave(BasicBlock* block); void impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr); GenTree* impTypeIsAssignable(GenTree* typeTo, GenTree* typeFrom); GenTree* impStringEqualsOrStartsWith(bool startsWith, CORINFO_SIG_INFO* sig, unsigned methodFlags); GenTree* impSpanEqualsOrStartsWith(bool startsWith, CORINFO_SIG_INFO* sig, unsigned methodFlags); GenTree* impExpandHalfConstEquals(GenTreeLclVar* data, GenTree* lengthFld, bool checkForNull, bool startsWith, WCHAR* cnsData, int len, int dataOffset); GenTree* impExpandHalfConstEqualsSWAR(GenTreeLclVar* data, WCHAR* cns, int len, int dataOffset); GenTree* impExpandHalfConstEqualsSIMD(GenTreeLclVar* data, WCHAR* cns, int len, int dataOffset); GenTreeStrCon* impGetStrConFromSpan(GenTree* span); GenTree* impIntrinsic(GenTree* newobjThis, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, unsigned methodFlags, int memberRef, bool readonlyCall, bool tailCall, CORINFO_RESOLVED_TOKEN* pContstrainedResolvedToken, CORINFO_THIS_TRANSFORM constraintCallThisTransform, NamedIntrinsic* pIntrinsicName, bool* isSpecialIntrinsic = nullptr); GenTree* impMathIntrinsic(CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, var_types callType, NamedIntrinsic intrinsicName, bool tailCall); NamedIntrinsic lookupNamedIntrinsic(CORINFO_METHOD_HANDLE method); GenTree* impUnsupportedNamedIntrinsic(unsigned helper, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, bool mustExpand); #ifdef FEATURE_HW_INTRINSICS GenTree* impHWIntrinsic(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, bool mustExpand); GenTree* impSimdAsHWIntrinsic(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, GenTree* newobjThis); protected: bool compSupportsHWIntrinsic(CORINFO_InstructionSet isa); GenTree* impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, var_types retType, CorInfoType simdBaseJitType, unsigned simdSize, GenTree* newobjThis); GenTree* impSpecialIntrinsic(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, CorInfoType simdBaseJitType, var_types retType, unsigned simdSize); GenTree* getArgForHWIntrinsic(var_types argType, CORINFO_CLASS_HANDLE argClass, bool expectAddr = false, GenTree* newobjThis = nullptr); GenTree* impNonConstFallback(NamedIntrinsic intrinsic, var_types simdType, CorInfoType simdBaseJitType); GenTree* addRangeCheckIfNeeded( NamedIntrinsic intrinsic, GenTree* immOp, bool mustExpand, int immLowerBound, int immUpperBound); GenTree* addRangeCheckForHWIntrinsic(GenTree* immOp, int immLowerBound, int immUpperBound); #ifdef TARGET_XARCH GenTree* impBaseIntrinsic(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, CorInfoType simdBaseJitType, var_types retType, unsigned simdSize); GenTree* impSSEIntrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig); GenTree* impSSE2Intrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig); GenTree* impAvxOrAvx2Intrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig); GenTree* impBMI1OrBMI2Intrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig); #endif // TARGET_XARCH #endif // FEATURE_HW_INTRINSICS GenTree* impArrayAccessIntrinsic(CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, int memberRef, bool readonlyCall, NamedIntrinsic intrinsicName); GenTree* impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig); GenTree* impCreateSpanIntrinsic(CORINFO_SIG_INFO* sig); GenTree* impKeepAliveIntrinsic(GenTree* objToKeepAlive); GenTree* impMethodPointer(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo); GenTree* impTransformThis(GenTree* thisPtr, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, CORINFO_THIS_TRANSFORM transform); //----------------- Manipulating the trees and stmts ---------------------- Statement* impStmtList; // Statements for the BB being imported. Statement* impLastStmt; // The last statement for the current BB. public: enum { CHECK_SPILL_ALL = -1, CHECK_SPILL_NONE = -2 }; void impBeginTreeList(); void impEndTreeList(BasicBlock* block, Statement* firstStmt, Statement* lastStmt); void impEndTreeList(BasicBlock* block); void impAppendStmtCheck(Statement* stmt, unsigned chkLevel); void impAppendStmt(Statement* stmt, unsigned chkLevel, bool checkConsumedDebugInfo = true); void impAppendStmt(Statement* stmt); void impInsertStmtBefore(Statement* stmt, Statement* stmtBefore); Statement* impAppendTree(GenTree* tree, unsigned chkLevel, const DebugInfo& di, bool checkConsumedDebugInfo = true); void impInsertTreeBefore(GenTree* tree, const DebugInfo& di, Statement* stmtBefore); void impAssignTempGen(unsigned tmp, GenTree* val, unsigned curLevel = (unsigned)CHECK_SPILL_NONE, Statement** pAfterStmt = nullptr, const DebugInfo& di = DebugInfo(), BasicBlock* block = nullptr); void impAssignTempGen(unsigned tmpNum, GenTree* val, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt = nullptr, const DebugInfo& di = DebugInfo(), BasicBlock* block = nullptr); Statement* impExtractLastStmt(); GenTree* impCloneExpr(GenTree* tree, GenTree** clone, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt DEBUGARG(const char* reason)); GenTree* impAssignStruct(GenTree* dest, GenTree* src, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt = nullptr, const DebugInfo& di = DebugInfo(), BasicBlock* block = nullptr); GenTree* impAssignStructPtr(GenTree* dest, GenTree* src, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt = nullptr, const DebugInfo& di = DebugInfo(), BasicBlock* block = nullptr); GenTree* impGetStructAddr(GenTree* structVal, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, bool willDeref); var_types impNormStructType(CORINFO_CLASS_HANDLE structHnd, CorInfoType* simdBaseJitType = nullptr); GenTree* impNormStructVal(GenTree* structVal, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, bool forceNormalization = false); GenTree* impTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool* pRuntimeLookup = nullptr, bool mustRestoreHandle = false, bool importParent = false); GenTree* impParentClassTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool* pRuntimeLookup = nullptr, bool mustRestoreHandle = false) { return impTokenToHandle(pResolvedToken, pRuntimeLookup, mustRestoreHandle, true); } GenTree* impLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, GenTreeFlags flags, void* compileTimeHandle); GenTree* getRuntimeContextTree(CORINFO_RUNTIME_LOOKUP_KIND kind); GenTree* impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, void* compileTimeHandle); GenTree* impReadyToRunLookupToTree(CORINFO_CONST_LOOKUP* pLookup, GenTreeFlags flags, void* compileTimeHandle); GenTreeCall* impReadyToRunHelperToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoHelpFunc helper, var_types type, GenTreeCall::Use* args = nullptr, CORINFO_LOOKUP_KIND* pGenericLookupKind = nullptr); bool impIsCastHelperEligibleForClassProbe(GenTree* tree); bool impIsCastHelperMayHaveProfileData(GenTree* tree); GenTree* impCastClassOrIsInstToTree( GenTree* op1, GenTree* op2, CORINFO_RESOLVED_TOKEN* pResolvedToken, bool isCastClass, IL_OFFSET ilOffset); GenTree* impOptimizeCastClassOrIsInst(GenTree* op1, CORINFO_RESOLVED_TOKEN* pResolvedToken, bool isCastClass); bool VarTypeIsMultiByteAndCanEnreg(var_types type, CORINFO_CLASS_HANDLE typeClass, unsigned* typeSize, bool forReturn, bool isVarArg, CorInfoCallConvExtension callConv); bool IsIntrinsicImplementedByUserCall(NamedIntrinsic intrinsicName); bool IsTargetIntrinsic(NamedIntrinsic intrinsicName); bool IsMathIntrinsic(NamedIntrinsic intrinsicName); bool IsMathIntrinsic(GenTree* tree); private: //----------------- Importing the method ---------------------------------- CORINFO_CONTEXT_HANDLE impTokenLookupContextHandle; // The context used for looking up tokens. #ifdef DEBUG unsigned impCurOpcOffs; const char* impCurOpcName; bool impNestedStackSpill; // For displaying instrs with generated native code (-n:B) Statement* impLastILoffsStmt; // oldest stmt added for which we did not call SetLastILOffset(). void impNoteLastILoffs(); #endif // Debug info of current statement being imported. It gets set to contain // no IL location (!impCurStmtDI.GetLocation().IsValid) after it has been // set in the appended trees. Then it gets updated at IL instructions for // which we have to report mapping info. // It will always contain the current inline context. DebugInfo impCurStmtDI; DebugInfo impCreateDIWithCurrentStackInfo(IL_OFFSET offs, bool isCall); void impCurStmtOffsSet(IL_OFFSET offs); void impNoteBranchOffs(); unsigned impInitBlockLineInfo(); bool impIsThis(GenTree* obj); bool impIsLDFTN_TOKEN(const BYTE* delegateCreateStart, const BYTE* newobjCodeAddr); bool impIsDUP_LDVIRTFTN_TOKEN(const BYTE* delegateCreateStart, const BYTE* newobjCodeAddr); bool impIsAnySTLOC(OPCODE opcode) { return ((opcode == CEE_STLOC) || (opcode == CEE_STLOC_S) || ((opcode >= CEE_STLOC_0) && (opcode <= CEE_STLOC_3))); } GenTreeCall::Use* impPopCallArgs(unsigned count, CORINFO_SIG_INFO* sig, GenTreeCall::Use* prefixArgs = nullptr); bool impCheckImplicitArgumentCoercion(var_types sigType, var_types nodeType) const; GenTreeCall::Use* impPopReverseCallArgs(unsigned count, CORINFO_SIG_INFO* sig, unsigned skipReverseCount = 0); //---------------- Spilling the importer stack ---------------------------- // The maximum number of bytes of IL processed without clean stack state. // It allows to limit the maximum tree size and depth. static const unsigned MAX_TREE_SIZE = 200; bool impCanSpillNow(OPCODE prevOpcode); struct PendingDsc { PendingDsc* pdNext; BasicBlock* pdBB; SavedStack pdSavedStack; ThisInitState pdThisPtrInit; }; PendingDsc* impPendingList; // list of BBs currently waiting to be imported. PendingDsc* impPendingFree; // Freed up dscs that can be reused // We keep a byte-per-block map (dynamically extended) in the top-level Compiler object of a compilation. JitExpandArray<BYTE> impPendingBlockMembers; // Return the byte for "b" (allocating/extending impPendingBlockMembers if necessary.) // Operates on the map in the top-level ancestor. BYTE impGetPendingBlockMember(BasicBlock* blk) { return impInlineRoot()->impPendingBlockMembers.Get(blk->bbInd()); } // Set the byte for "b" to "val" (allocating/extending impPendingBlockMembers if necessary.) // Operates on the map in the top-level ancestor. void impSetPendingBlockMember(BasicBlock* blk, BYTE val) { impInlineRoot()->impPendingBlockMembers.Set(blk->bbInd(), val); } bool impCanReimport; bool impSpillStackEntry(unsigned level, unsigned varNum #ifdef DEBUG , bool bAssertOnRecursion, const char* reason #endif ); void impSpillStackEnsure(bool spillLeaves = false); void impEvalSideEffects(); void impSpillSpecialSideEff(); void impSpillSideEffects(bool spillGlobEffects, unsigned chkLevel DEBUGARG(const char* reason)); void impSpillValueClasses(); void impSpillEvalStack(); static fgWalkPreFn impFindValueClasses; void impSpillLclRefs(ssize_t lclNum); BasicBlock* impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_HANDLE clsHnd, bool isSingleBlockFilter); bool impBlockIsInALoop(BasicBlock* block); void impImportBlockCode(BasicBlock* block); void impReimportMarkBlock(BasicBlock* block); void impReimportMarkSuccessors(BasicBlock* block); void impVerifyEHBlock(BasicBlock* block, bool isTryStart); void impImportBlockPending(BasicBlock* block); // Similar to impImportBlockPending, but assumes that block has already been imported once and is being // reimported for some reason. It specifically does *not* look at verCurrentState to set the EntryState // for the block, but instead, just re-uses the block's existing EntryState. void impReimportBlockPending(BasicBlock* block); var_types impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTree** pOp1, GenTree** pOp2); void impImportBlock(BasicBlock* block); // Assumes that "block" is a basic block that completes with a non-empty stack. We will assign the values // on the stack to local variables (the "spill temp" variables). The successor blocks will assume that // its incoming stack contents are in those locals. This requires "block" and its successors to agree on // the variables that will be used -- and for all the predecessors of those successors, and the // successors of those predecessors, etc. Call such a set of blocks closed under alternating // successor/predecessor edges a "spill clique." A block is a "predecessor" or "successor" member of the // clique (or, conceivably, both). Each block has a specified sequence of incoming and outgoing spill // temps. If "block" already has its outgoing spill temps assigned (they are always a contiguous series // of local variable numbers, so we represent them with the base local variable number), returns that. // Otherwise, picks a set of spill temps, and propagates this choice to all blocks in the spill clique of // which "block" is a member (asserting, in debug mode, that no block in this clique had its spill temps // chosen already. More precisely, that the incoming or outgoing spill temps are not chosen, depending // on which kind of member of the clique the block is). unsigned impGetSpillTmpBase(BasicBlock* block); // Assumes that "block" is a basic block that completes with a non-empty stack. We have previously // assigned the values on the stack to local variables (the "spill temp" variables). The successor blocks // will assume that its incoming stack contents are in those locals. This requires "block" and its // successors to agree on the variables and their types that will be used. The CLI spec allows implicit // conversions between 'int' and 'native int' or 'float' and 'double' stack types. So one predecessor can // push an int and another can push a native int. For 64-bit we have chosen to implement this by typing // the "spill temp" as native int, and then importing (or re-importing as needed) so that all the // predecessors in the "spill clique" push a native int (sign-extending if needed), and all the // successors receive a native int. Similarly float and double are unified to double. // This routine is called after a type-mismatch is detected, and it will walk the spill clique to mark // blocks for re-importation as appropriate (both successors, so they get the right incoming type, and // predecessors, so they insert an upcast if needed). void impReimportSpillClique(BasicBlock* block); // When we compute a "spill clique" (see above) these byte-maps are allocated to have a byte per basic // block, and represent the predecessor and successor members of the clique currently being computed. // *** Access to these will need to be locked in a parallel compiler. JitExpandArray<BYTE> impSpillCliquePredMembers; JitExpandArray<BYTE> impSpillCliqueSuccMembers; enum SpillCliqueDir { SpillCliquePred, SpillCliqueSucc }; // Abstract class for receiving a callback while walking a spill clique class SpillCliqueWalker { public: virtual void Visit(SpillCliqueDir predOrSucc, BasicBlock* blk) = 0; }; // This class is used for setting the bbStkTempsIn and bbStkTempsOut on the blocks within a spill clique class SetSpillTempsBase : public SpillCliqueWalker { unsigned m_baseTmp; public: SetSpillTempsBase(unsigned baseTmp) : m_baseTmp(baseTmp) { } virtual void Visit(SpillCliqueDir predOrSucc, BasicBlock* blk); }; // This class is used for implementing impReimportSpillClique part on each block within the spill clique class ReimportSpillClique : public SpillCliqueWalker { Compiler* m_pComp; public: ReimportSpillClique(Compiler* pComp) : m_pComp(pComp) { } virtual void Visit(SpillCliqueDir predOrSucc, BasicBlock* blk); }; // This is the heart of the algorithm for walking spill cliques. It invokes callback->Visit for each // predecessor or successor within the spill clique void impWalkSpillCliqueFromPred(BasicBlock* pred, SpillCliqueWalker* callback); // For a BasicBlock that has already been imported, the EntryState has an array of GenTrees for the // incoming locals. This walks that list an resets the types of the GenTrees to match the types of // the VarDscs. They get out of sync when we have int/native int issues (see impReimportSpillClique). void impRetypeEntryStateTemps(BasicBlock* blk); BYTE impSpillCliqueGetMember(SpillCliqueDir predOrSucc, BasicBlock* blk); void impSpillCliqueSetMember(SpillCliqueDir predOrSucc, BasicBlock* blk, BYTE val); void impPushVar(GenTree* op, typeInfo tiRetVal); GenTreeLclVar* impCreateLocalNode(unsigned lclNum DEBUGARG(IL_OFFSET offset)); void impLoadVar(unsigned lclNum, IL_OFFSET offset, const typeInfo& tiRetVal); void impLoadVar(unsigned lclNum, IL_OFFSET offset) { impLoadVar(lclNum, offset, lvaGetDesc(lclNum)->lvVerTypeInfo); } void impLoadArg(unsigned ilArgNum, IL_OFFSET offset); void impLoadLoc(unsigned ilLclNum, IL_OFFSET offset); bool impReturnInstruction(int prefixFlags, OPCODE& opcode); #ifdef TARGET_ARM void impMarkLclDstNotPromotable(unsigned tmpNum, GenTree* op, CORINFO_CLASS_HANDLE hClass); #endif // A free list of linked list nodes used to represent to-do stacks of basic blocks. struct BlockListNode { BasicBlock* m_blk; BlockListNode* m_next; BlockListNode(BasicBlock* blk, BlockListNode* next = nullptr) : m_blk(blk), m_next(next) { } void* operator new(size_t sz, Compiler* comp); }; BlockListNode* impBlockListNodeFreeList; void FreeBlockListNode(BlockListNode* node); bool impIsValueType(typeInfo* pTypeInfo); var_types mangleVarArgsType(var_types type); regNumber getCallArgIntRegister(regNumber floatReg); regNumber getCallArgFloatRegister(regNumber intReg); #if defined(DEBUG) static unsigned jitTotalMethodCompiled; #endif #ifdef DEBUG static LONG jitNestingLevel; #endif // DEBUG static bool impIsAddressInLocal(const GenTree* tree, GenTree** lclVarTreeOut = nullptr); void impMakeDiscretionaryInlineObservations(InlineInfo* pInlineInfo, InlineResult* inlineResult); // STATIC inlining decision based on the IL code. void impCanInlineIL(CORINFO_METHOD_HANDLE fncHandle, CORINFO_METHOD_INFO* methInfo, bool forceInline, InlineResult* inlineResult); void impCheckCanInline(GenTreeCall* call, CORINFO_METHOD_HANDLE fncHandle, unsigned methAttr, CORINFO_CONTEXT_HANDLE exactContextHnd, InlineCandidateInfo** ppInlineCandidateInfo, InlineResult* inlineResult); void impInlineRecordArgInfo(InlineInfo* pInlineInfo, GenTree* curArgVal, unsigned argNum, InlineResult* inlineResult); void impInlineInitVars(InlineInfo* pInlineInfo); unsigned impInlineFetchLocal(unsigned lclNum DEBUGARG(const char* reason)); GenTree* impInlineFetchArg(unsigned lclNum, InlArgInfo* inlArgInfo, InlLclVarInfo* lclTypeInfo); bool impInlineIsThis(GenTree* tree, InlArgInfo* inlArgInfo); bool impInlineIsGuaranteedThisDerefBeforeAnySideEffects(GenTree* additionalTree, GenTreeCall::Use* additionalCallArgs, GenTree* dereferencedAddress, InlArgInfo* inlArgInfo); void impMarkInlineCandidate(GenTree* call, CORINFO_CONTEXT_HANDLE exactContextHnd, bool exactContextNeedsRuntimeLookup, CORINFO_CALL_INFO* callInfo); void impMarkInlineCandidateHelper(GenTreeCall* call, CORINFO_CONTEXT_HANDLE exactContextHnd, bool exactContextNeedsRuntimeLookup, CORINFO_CALL_INFO* callInfo); bool impTailCallRetTypeCompatible(bool allowWidening, var_types callerRetType, CORINFO_CLASS_HANDLE callerRetTypeClass, CorInfoCallConvExtension callerCallConv, var_types calleeRetType, CORINFO_CLASS_HANDLE calleeRetTypeClass, CorInfoCallConvExtension calleeCallConv); bool impIsTailCallILPattern( bool tailPrefixed, OPCODE curOpcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, bool isRecursive); bool impIsImplicitTailCallCandidate( OPCODE curOpcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, int prefixFlags, bool isRecursive); bool impIsClassExact(CORINFO_CLASS_HANDLE classHnd); bool impCanSkipCovariantStoreCheck(GenTree* value, GenTree* array); CORINFO_RESOLVED_TOKEN* impAllocateToken(const CORINFO_RESOLVED_TOKEN& token); /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX FlowGraph XX XX XX XX Info about the basic-blocks, their contents and the flow analysis XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: BasicBlock* fgFirstBB; // Beginning of the basic block list BasicBlock* fgLastBB; // End of the basic block list BasicBlock* fgFirstColdBlock; // First block to be placed in the cold section BasicBlock* fgEntryBB; // For OSR, the original method's entry point BasicBlock* fgOSREntryBB; // For OSR, the logical entry point (~ patchpoint) #if defined(FEATURE_EH_FUNCLETS) BasicBlock* fgFirstFuncletBB; // First block of outlined funclets (to allow block insertion before the funclets) #endif BasicBlock* fgFirstBBScratch; // Block inserted for initialization stuff. Is nullptr if no such block has been // created. BasicBlockList* fgReturnBlocks; // list of BBJ_RETURN blocks unsigned fgEdgeCount; // # of control flow edges between the BBs unsigned fgBBcount; // # of BBs in the method #ifdef DEBUG unsigned fgBBcountAtCodegen; // # of BBs in the method at the start of codegen #endif unsigned fgBBNumMax; // The max bbNum that has been assigned to basic blocks unsigned fgDomBBcount; // # of BBs for which we have dominator and reachability information BasicBlock** fgBBInvPostOrder; // The flow graph stored in an array sorted in topological order, needed to compute // dominance. Indexed by block number. Size: fgBBNumMax + 1. // After the dominance tree is computed, we cache a DFS preorder number and DFS postorder number to compute // dominance queries in O(1). fgDomTreePreOrder and fgDomTreePostOrder are arrays giving the block's preorder and // postorder number, respectively. The arrays are indexed by basic block number. (Note that blocks are numbered // starting from one. Thus, we always waste element zero. This makes debugging easier and makes the code less likely // to suffer from bugs stemming from forgetting to add or subtract one from the block number to form an array // index). The arrays are of size fgBBNumMax + 1. unsigned* fgDomTreePreOrder; unsigned* fgDomTreePostOrder; // Dominator tree used by SSA construction and copy propagation (the two are expected to use the same tree // in order to avoid the need for SSA reconstruction and an "out of SSA" phase). DomTreeNode* fgSsaDomTree; bool fgBBVarSetsInited; // Allocate array like T* a = new T[fgBBNumMax + 1]; // Using helper so we don't keep forgetting +1. template <typename T> T* fgAllocateTypeForEachBlk(CompMemKind cmk = CMK_Unknown) { return getAllocator(cmk).allocate<T>(fgBBNumMax + 1); } // BlockSets are relative to a specific set of BasicBlock numbers. If that changes // (if the blocks are renumbered), this changes. BlockSets from different epochs // cannot be meaningfully combined. Note that new blocks can be created with higher // block numbers without changing the basic block epoch. These blocks *cannot* // participate in a block set until the blocks are all renumbered, causing the epoch // to change. This is useful if continuing to use previous block sets is valuable. // If the epoch is zero, then it is uninitialized, and block sets can't be used. unsigned fgCurBBEpoch; unsigned GetCurBasicBlockEpoch() { return fgCurBBEpoch; } // The number of basic blocks in the current epoch. When the blocks are renumbered, // this is fgBBcount. As blocks are added, fgBBcount increases, fgCurBBEpochSize remains // the same, until a new BasicBlock epoch is created, such as when the blocks are all renumbered. unsigned fgCurBBEpochSize; // The number of "size_t" elements required to hold a bitset large enough for fgCurBBEpochSize // bits. This is precomputed to avoid doing math every time BasicBlockBitSetTraits::GetArrSize() is called. unsigned fgBBSetCountInSizeTUnits; void NewBasicBlockEpoch() { INDEBUG(unsigned oldEpochArrSize = fgBBSetCountInSizeTUnits); // We have a new epoch. Compute and cache the size needed for new BlockSets. fgCurBBEpoch++; fgCurBBEpochSize = fgBBNumMax + 1; fgBBSetCountInSizeTUnits = roundUp(fgCurBBEpochSize, (unsigned)(sizeof(size_t) * 8)) / unsigned(sizeof(size_t) * 8); #ifdef DEBUG // All BlockSet objects are now invalid! fgReachabilitySetsValid = false; // the bbReach sets are now invalid! fgEnterBlksSetValid = false; // the fgEnterBlks set is now invalid! if (verbose) { unsigned epochArrSize = BasicBlockBitSetTraits::GetArrSize(this, sizeof(size_t)); printf("\nNew BlockSet epoch %d, # of blocks (including unused BB00): %u, bitset array size: %u (%s)", fgCurBBEpoch, fgCurBBEpochSize, epochArrSize, (epochArrSize <= 1) ? "short" : "long"); if ((fgCurBBEpoch != 1) && ((oldEpochArrSize <= 1) != (epochArrSize <= 1))) { // If we're not just establishing the first epoch, and the epoch array size has changed such that we're // going to change our bitset representation from short (just a size_t bitset) to long (a pointer to an // array of size_t bitsets), then print that out. printf("; NOTE: BlockSet size was previously %s!", (oldEpochArrSize <= 1) ? "short" : "long"); } printf("\n"); } #endif // DEBUG } void EnsureBasicBlockEpoch() { if (fgCurBBEpochSize != fgBBNumMax + 1) { NewBasicBlockEpoch(); } } BasicBlock* fgNewBasicBlock(BBjumpKinds jumpKind); void fgEnsureFirstBBisScratch(); bool fgFirstBBisScratch(); bool fgBBisScratch(BasicBlock* block); void fgExtendEHRegionBefore(BasicBlock* block); void fgExtendEHRegionAfter(BasicBlock* block); BasicBlock* fgNewBBbefore(BBjumpKinds jumpKind, BasicBlock* block, bool extendRegion); BasicBlock* fgNewBBafter(BBjumpKinds jumpKind, BasicBlock* block, bool extendRegion); BasicBlock* fgNewBBinRegion(BBjumpKinds jumpKind, unsigned tryIndex, unsigned hndIndex, BasicBlock* nearBlk, bool putInFilter = false, bool runRarely = false, bool insertAtEnd = false); BasicBlock* fgNewBBinRegion(BBjumpKinds jumpKind, BasicBlock* srcBlk, bool runRarely = false, bool insertAtEnd = false); BasicBlock* fgNewBBinRegion(BBjumpKinds jumpKind); BasicBlock* fgNewBBinRegionWorker(BBjumpKinds jumpKind, BasicBlock* afterBlk, unsigned xcptnIndex, bool putInTryRegion); void fgInsertBBbefore(BasicBlock* insertBeforeBlk, BasicBlock* newBlk); void fgInsertBBafter(BasicBlock* insertAfterBlk, BasicBlock* newBlk); void fgUnlinkBlock(BasicBlock* block); #ifdef FEATURE_JIT_METHOD_PERF unsigned fgMeasureIR(); #endif // FEATURE_JIT_METHOD_PERF bool fgModified; // True if the flow graph has been modified recently bool fgComputePredsDone; // Have we computed the bbPreds list bool fgCheapPredsValid; // Is the bbCheapPreds list valid? bool fgDomsComputed; // Have we computed the dominator sets? bool fgReturnBlocksComputed; // Have we computed the return blocks list? bool fgOptimizedFinally; // Did we optimize any try-finallys? bool fgHasSwitch; // any BBJ_SWITCH jumps? BlockSet fgEnterBlks; // Set of blocks which have a special transfer of control; the "entry" blocks plus EH handler // begin blocks. #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) BlockSet fgAlwaysBlks; // Set of blocks which are BBJ_ALWAYS part of BBJ_CALLFINALLY/BBJ_ALWAYS pair that should // never be removed due to a requirement to use the BBJ_ALWAYS for generating code and // not have "retless" blocks. #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) #ifdef DEBUG bool fgReachabilitySetsValid; // Are the bbReach sets valid? bool fgEnterBlksSetValid; // Is the fgEnterBlks set valid? #endif // DEBUG bool fgRemoveRestOfBlock; // true if we know that we will throw bool fgStmtRemoved; // true if we remove statements -> need new DFA // There are two modes for ordering of the trees. // - In FGOrderTree, the dominant ordering is the tree order, and the nodes contained in // each tree and sub-tree are contiguous, and can be traversed (in gtNext/gtPrev order) // by traversing the tree according to the order of the operands. // - In FGOrderLinear, the dominant ordering is the linear order. enum FlowGraphOrder { FGOrderTree, FGOrderLinear }; FlowGraphOrder fgOrder; // The following are boolean flags that keep track of the state of internal data structures bool fgStmtListThreaded; // true if the node list is now threaded bool fgCanRelocateEHRegions; // true if we are allowed to relocate the EH regions bool fgEdgeWeightsComputed; // true after we have called fgComputeEdgeWeights bool fgHaveValidEdgeWeights; // true if we were successful in computing all of the edge weights bool fgSlopUsedInEdgeWeights; // true if their was some slop used when computing the edge weights bool fgRangeUsedInEdgeWeights; // true if some of the edgeWeight are expressed in Min..Max form bool fgNeedsUpdateFlowGraph; // true if we need to run fgUpdateFlowGraph weight_t fgCalledCount; // count of the number of times this method was called // This is derived from the profile data // or is BB_UNITY_WEIGHT when we don't have profile data #if defined(FEATURE_EH_FUNCLETS) bool fgFuncletsCreated; // true if the funclet creation phase has been run #endif // FEATURE_EH_FUNCLETS bool fgGlobalMorph; // indicates if we are during the global morphing phase // since fgMorphTree can be called from several places bool impBoxTempInUse; // the temp below is valid and available unsigned impBoxTemp; // a temporary that is used for boxing #ifdef DEBUG bool jitFallbackCompile; // Are we doing a fallback compile? That is, have we executed a NO_WAY assert, // and we are trying to compile again in a "safer", minopts mode? #endif #if defined(DEBUG) unsigned impInlinedCodeSize; bool fgPrintInlinedMethods; #endif jitstd::vector<flowList*>* fgPredListSortVector; //------------------------------------------------------------------------- void fgInit(); PhaseStatus fgImport(); PhaseStatus fgTransformIndirectCalls(); PhaseStatus fgTransformPatchpoints(); PhaseStatus fgInline(); PhaseStatus fgRemoveEmptyTry(); PhaseStatus fgRemoveEmptyFinally(); PhaseStatus fgMergeFinallyChains(); PhaseStatus fgCloneFinally(); void fgCleanupContinuation(BasicBlock* continuation); #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) PhaseStatus fgUpdateFinallyTargetFlags(); void fgClearAllFinallyTargetBits(); void fgAddFinallyTargetFlags(); #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) PhaseStatus fgTailMergeThrows(); void fgTailMergeThrowsFallThroughHelper(BasicBlock* predBlock, BasicBlock* nonCanonicalBlock, BasicBlock* canonicalBlock, flowList* predEdge); void fgTailMergeThrowsJumpToHelper(BasicBlock* predBlock, BasicBlock* nonCanonicalBlock, BasicBlock* canonicalBlock, flowList* predEdge); GenTree* fgCheckCallArgUpdate(GenTree* parent, GenTree* child, var_types origType); #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // Sometimes we need to defer updating the BBF_FINALLY_TARGET bit. fgNeedToAddFinallyTargetBits signals // when this is necessary. bool fgNeedToAddFinallyTargetBits; #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) bool fgRetargetBranchesToCanonicalCallFinally(BasicBlock* block, BasicBlock* handler, BlockToBlockMap& continuationMap); GenTree* fgGetCritSectOfStaticMethod(); #if defined(FEATURE_EH_FUNCLETS) void fgAddSyncMethodEnterExit(); GenTree* fgCreateMonitorTree(unsigned lvaMonitorBool, unsigned lvaThisVar, BasicBlock* block, bool enter); void fgConvertSyncReturnToLeave(BasicBlock* block); #endif // FEATURE_EH_FUNCLETS void fgAddReversePInvokeEnterExit(); bool fgMoreThanOneReturnBlock(); // The number of separate return points in the method. unsigned fgReturnCount; void fgAddInternal(); enum class FoldResult { FOLD_DID_NOTHING, FOLD_CHANGED_CONTROL_FLOW, FOLD_REMOVED_LAST_STMT, FOLD_ALTERED_LAST_STMT, }; FoldResult fgFoldConditional(BasicBlock* block); void fgMorphStmts(BasicBlock* block); void fgMorphBlocks(); void fgMergeBlockReturn(BasicBlock* block); bool fgMorphBlockStmt(BasicBlock* block, Statement* stmt DEBUGARG(const char* msg)); void fgSetOptions(); #ifdef DEBUG static fgWalkPreFn fgAssertNoQmark; void fgPreExpandQmarkChecks(GenTree* expr); void fgPostExpandQmarkChecks(); static void fgCheckQmarkAllowedForm(GenTree* tree); #endif IL_OFFSET fgFindBlockILOffset(BasicBlock* block); void fgFixEntryFlowForOSR(); BasicBlock* fgSplitBlockAtBeginning(BasicBlock* curr); BasicBlock* fgSplitBlockAtEnd(BasicBlock* curr); BasicBlock* fgSplitBlockAfterStatement(BasicBlock* curr, Statement* stmt); BasicBlock* fgSplitBlockAfterNode(BasicBlock* curr, GenTree* node); // for LIR BasicBlock* fgSplitEdge(BasicBlock* curr, BasicBlock* succ); Statement* fgNewStmtFromTree(GenTree* tree, BasicBlock* block, const DebugInfo& di); Statement* fgNewStmtFromTree(GenTree* tree); Statement* fgNewStmtFromTree(GenTree* tree, BasicBlock* block); Statement* fgNewStmtFromTree(GenTree* tree, const DebugInfo& di); GenTree* fgGetTopLevelQmark(GenTree* expr, GenTree** ppDst = nullptr); void fgExpandQmarkForCastInstOf(BasicBlock* block, Statement* stmt); void fgExpandQmarkStmt(BasicBlock* block, Statement* stmt); void fgExpandQmarkNodes(); // Do "simple lowering." This functionality is (conceptually) part of "general" // lowering that is distributed between fgMorph and the lowering phase of LSRA. void fgSimpleLowering(); GenTree* fgInitThisClass(); GenTreeCall* fgGetStaticsCCtorHelper(CORINFO_CLASS_HANDLE cls, CorInfoHelpFunc helper); GenTreeCall* fgGetSharedCCtor(CORINFO_CLASS_HANDLE cls); bool backendRequiresLocalVarLifetimes() { return !opts.MinOpts() || m_pLinearScan->willEnregisterLocalVars(); } void fgLocalVarLiveness(); void fgLocalVarLivenessInit(); void fgPerNodeLocalVarLiveness(GenTree* node); void fgPerBlockLocalVarLiveness(); VARSET_VALRET_TP fgGetHandlerLiveVars(BasicBlock* block); void fgLiveVarAnalysis(bool updateInternalOnly = false); void fgComputeLifeCall(VARSET_TP& life, GenTreeCall* call); void fgComputeLifeTrackedLocalUse(VARSET_TP& life, LclVarDsc& varDsc, GenTreeLclVarCommon* node); bool fgComputeLifeTrackedLocalDef(VARSET_TP& life, VARSET_VALARG_TP keepAliveVars, LclVarDsc& varDsc, GenTreeLclVarCommon* node); bool fgComputeLifeUntrackedLocal(VARSET_TP& life, VARSET_VALARG_TP keepAliveVars, LclVarDsc& varDsc, GenTreeLclVarCommon* lclVarNode); bool fgComputeLifeLocal(VARSET_TP& life, VARSET_VALARG_TP keepAliveVars, GenTree* lclVarNode); void fgComputeLife(VARSET_TP& life, GenTree* startNode, GenTree* endNode, VARSET_VALARG_TP volatileVars, bool* pStmtInfoDirty DEBUGARG(bool* treeModf)); void fgComputeLifeLIR(VARSET_TP& life, BasicBlock* block, VARSET_VALARG_TP volatileVars); bool fgTryRemoveNonLocal(GenTree* node, LIR::Range* blockRange); void fgRemoveDeadStoreLIR(GenTree* store, BasicBlock* block); bool fgRemoveDeadStore(GenTree** pTree, LclVarDsc* varDsc, VARSET_VALARG_TP life, bool* doAgain, bool* pStmtInfoDirty, bool* pStoreRemoved DEBUGARG(bool* treeModf)); void fgInterBlockLocalVarLiveness(); // Blocks: convenience methods for enabling range-based `for` iteration over the function's blocks, e.g.: // 1. for (BasicBlock* const block : compiler->Blocks()) ... // 2. for (BasicBlock* const block : compiler->Blocks(startBlock)) ... // 3. for (BasicBlock* const block : compiler->Blocks(startBlock, endBlock)) ... // In case (1), the block list can be empty. In case (2), `startBlock` can be nullptr. In case (3), // both `startBlock` and `endBlock` must be non-null. // BasicBlockSimpleList Blocks() const { return BasicBlockSimpleList(fgFirstBB); } BasicBlockSimpleList Blocks(BasicBlock* startBlock) const { return BasicBlockSimpleList(startBlock); } BasicBlockRangeList Blocks(BasicBlock* startBlock, BasicBlock* endBlock) const { return BasicBlockRangeList(startBlock, endBlock); } // The presence of a partial definition presents some difficulties for SSA: this is both a use of some SSA name // of "x", and a def of a new SSA name for "x". The tree only has one local variable for "x", so it has to choose // whether to treat that as the use or def. It chooses the "use", and thus the old SSA name. This map allows us // to record/recover the "def" SSA number, given the lcl var node for "x" in such a tree. typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, unsigned> NodeToUnsignedMap; NodeToUnsignedMap* m_opAsgnVarDefSsaNums; NodeToUnsignedMap* GetOpAsgnVarDefSsaNums() { if (m_opAsgnVarDefSsaNums == nullptr) { m_opAsgnVarDefSsaNums = new (getAllocator()) NodeToUnsignedMap(getAllocator()); } return m_opAsgnVarDefSsaNums; } // This map tracks nodes whose value numbers explicitly or implicitly depend on memory states. // The map provides the entry block of the most closely enclosing loop that // defines the memory region accessed when defining the nodes's VN. // // This information should be consulted when considering hoisting node out of a loop, as the VN // for the node will only be valid within the indicated loop. // // It is not fine-grained enough to track memory dependence within loops, so cannot be used // for more general code motion. // // If a node does not have an entry in the map we currently assume the VN is not memory dependent // and so memory does not constrain hoisting. // typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, BasicBlock*> NodeToLoopMemoryBlockMap; NodeToLoopMemoryBlockMap* m_nodeToLoopMemoryBlockMap; NodeToLoopMemoryBlockMap* GetNodeToLoopMemoryBlockMap() { if (m_nodeToLoopMemoryBlockMap == nullptr) { m_nodeToLoopMemoryBlockMap = new (getAllocator()) NodeToLoopMemoryBlockMap(getAllocator()); } return m_nodeToLoopMemoryBlockMap; } void optRecordLoopMemoryDependence(GenTree* tree, BasicBlock* block, ValueNum memoryVN); void optCopyLoopMemoryDependence(GenTree* fromTree, GenTree* toTree); // Requires value numbering phase to have completed. Returns the value number ("gtVN") of the // "tree," EXCEPT in the case of GTF_VAR_USEASG, because the tree node's gtVN member is the // "use" VN. Performs a lookup into the map of (use asg tree -> def VN.) to return the "def's" // VN. inline ValueNum GetUseAsgDefVNOrTreeVN(GenTree* tree); // Requires that "lcl" has the GTF_VAR_DEF flag set. Returns the SSA number of "lcl". // Except: assumes that lcl is a def, and if it is // a partial def (GTF_VAR_USEASG), looks up and returns the SSA number for the "def", // rather than the "use" SSA number recorded in the tree "lcl". inline unsigned GetSsaNumForLocalVarDef(GenTree* lcl); inline bool PreciseRefCountsRequired(); // Performs SSA conversion. void fgSsaBuild(); // Reset any data structures to the state expected by "fgSsaBuild", so it can be run again. void fgResetForSsa(); unsigned fgSsaPassesCompleted; // Number of times fgSsaBuild has been run. // Returns "true" if this is a special variable that is never zero initialized in the prolog. inline bool fgVarIsNeverZeroInitializedInProlog(unsigned varNum); // Returns "true" if the variable needs explicit zero initialization. inline bool fgVarNeedsExplicitZeroInit(unsigned varNum, bool bbInALoop, bool bbIsReturn); // The value numbers for this compilation. ValueNumStore* vnStore; public: ValueNumStore* GetValueNumStore() { return vnStore; } // Do value numbering (assign a value number to each // tree node). void fgValueNumber(); // Computes new GcHeap VN via the assignment H[elemTypeEq][arrVN][inx][fldSeq] = rhsVN. // Assumes that "elemTypeEq" is the (equivalence class rep) of the array element type. // The 'indType' is the indirection type of the lhs of the assignment and will typically // match the element type of the array or fldSeq. When this type doesn't match // or if the fldSeq is 'NotAField' we invalidate the array contents H[elemTypeEq][arrVN] // ValueNum fgValueNumberArrIndexAssign(CORINFO_CLASS_HANDLE elemTypeEq, ValueNum arrVN, ValueNum inxVN, FieldSeqNode* fldSeq, ValueNum rhsVN, var_types indType); // Requires that "tree" is a GT_IND marked as an array index, and that its address argument // has been parsed to yield the other input arguments. If evaluation of the address // can raise exceptions, those should be captured in the exception set "addrXvnp". // Assumes that "elemTypeEq" is the (equivalence class rep) of the array element type. // Marks "tree" with the VN for H[elemTypeEq][arrVN][inx][fldSeq] (for the liberal VN; a new unique // VN for the conservative VN.) Also marks the tree's argument as the address of an array element. // The type tree->TypeGet() will typically match the element type of the array or fldSeq. // When this type doesn't match or if the fldSeq is 'NotAField' we return a new unique VN // ValueNum fgValueNumberArrIndexVal(GenTree* tree, CORINFO_CLASS_HANDLE elemTypeEq, ValueNum arrVN, ValueNum inxVN, ValueNumPair addrXvnp, FieldSeqNode* fldSeq); // Requires "funcApp" to be a VNF_PtrToArrElem, and "addrXvnp" to represent the exception set thrown // by evaluating the array index expression "tree". Returns the value number resulting from // dereferencing the array in the current GcHeap state. If "tree" is non-null, it must be the // "GT_IND" that does the dereference, and it is given the returned value number. ValueNum fgValueNumberArrIndexVal(GenTree* tree, VNFuncApp* funcApp, ValueNumPair addrXvnp); // Compute the value number for a byref-exposed load of the given type via the given pointerVN. ValueNum fgValueNumberByrefExposedLoad(var_types type, ValueNum pointerVN); unsigned fgVNPassesCompleted; // Number of times fgValueNumber has been run. // Utility functions for fgValueNumber. // Perform value-numbering for the trees in "blk". void fgValueNumberBlock(BasicBlock* blk); // Requires that "entryBlock" is the entry block of loop "loopNum", and that "loopNum" is the // innermost loop of which "entryBlock" is the entry. Returns the value number that should be // assumed for the memoryKind at the start "entryBlk". ValueNum fgMemoryVNForLoopSideEffects(MemoryKind memoryKind, BasicBlock* entryBlock, unsigned loopNum); // Called when an operation (performed by "tree", described by "msg") may cause the GcHeap to be mutated. // As GcHeap is a subset of ByrefExposed, this will also annotate the ByrefExposed mutation. void fgMutateGcHeap(GenTree* tree DEBUGARG(const char* msg)); // Called when an operation (performed by "tree", described by "msg") may cause an address-exposed local to be // mutated. void fgMutateAddressExposedLocal(GenTree* tree DEBUGARG(const char* msg)); // For a GC heap store at curTree, record the new curMemoryVN's and update curTree's MemorySsaMap. // As GcHeap is a subset of ByrefExposed, this will also record the ByrefExposed store. void recordGcHeapStore(GenTree* curTree, ValueNum gcHeapVN DEBUGARG(const char* msg)); // For a store to an address-exposed local at curTree, record the new curMemoryVN and update curTree's MemorySsaMap. void recordAddressExposedLocalStore(GenTree* curTree, ValueNum memoryVN DEBUGARG(const char* msg)); void fgSetCurrentMemoryVN(MemoryKind memoryKind, ValueNum newMemoryVN); // Tree caused an update in the current memory VN. If "tree" has an associated heap SSA #, record that // value in that SSA #. void fgValueNumberRecordMemorySsa(MemoryKind memoryKind, GenTree* tree); // The input 'tree' is a leaf node that is a constant // Assign the proper value number to the tree void fgValueNumberTreeConst(GenTree* tree); // If the VN store has been initialized, reassign the // proper value number to the constant tree. void fgUpdateConstTreeValueNumber(GenTree* tree); // Assumes that all inputs to "tree" have had value numbers assigned; assigns a VN to tree. // (With some exceptions: the VN of the lhs of an assignment is assigned as part of the // assignment.) void fgValueNumberTree(GenTree* tree); void fgValueNumberAssignment(GenTreeOp* tree); // Does value-numbering for a block assignment. void fgValueNumberBlockAssignment(GenTree* tree); bool fgValueNumberBlockAssignmentTypeCheck(LclVarDsc* dstVarDsc, FieldSeqNode* dstFldSeq, GenTree* src); // Does value-numbering for a cast tree. void fgValueNumberCastTree(GenTree* tree); // Does value-numbering for an intrinsic tree. void fgValueNumberIntrinsic(GenTree* tree); #ifdef FEATURE_SIMD // Does value-numbering for a GT_SIMD tree void fgValueNumberSimd(GenTreeSIMD* tree); #endif // FEATURE_SIMD #ifdef FEATURE_HW_INTRINSICS // Does value-numbering for a GT_HWINTRINSIC tree void fgValueNumberHWIntrinsic(GenTreeHWIntrinsic* tree); #endif // FEATURE_HW_INTRINSICS // Does value-numbering for a call. We interpret some helper calls. void fgValueNumberCall(GenTreeCall* call); // Does value-numbering for a helper representing a cast operation. void fgValueNumberCastHelper(GenTreeCall* call); // Does value-numbering for a helper "call" that has a VN function symbol "vnf". void fgValueNumberHelperCallFunc(GenTreeCall* call, VNFunc vnf, ValueNumPair vnpExc); // Requires "helpCall" to be a helper call. Assigns it a value number; // we understand the semantics of some of the calls. Returns "true" if // the call may modify the heap (we assume arbitrary memory side effects if so). bool fgValueNumberHelperCall(GenTreeCall* helpCall); // Requires that "helpFunc" is one of the pure Jit Helper methods. // Returns the corresponding VNFunc to use for value numbering VNFunc fgValueNumberJitHelperMethodVNFunc(CorInfoHelpFunc helpFunc); // Adds the exception set for the current tree node which has a memory indirection operation void fgValueNumberAddExceptionSetForIndirection(GenTree* tree, GenTree* baseAddr); // Adds the exception sets for the current tree node which is performing a division or modulus operation void fgValueNumberAddExceptionSetForDivision(GenTree* tree); // Adds the exception set for the current tree node which is performing a overflow checking operation void fgValueNumberAddExceptionSetForOverflow(GenTree* tree); // Adds the exception set for the current tree node which is performing a bounds check operation void fgValueNumberAddExceptionSetForBoundsCheck(GenTree* tree); // Adds the exception set for the current tree node which is performing a ckfinite operation void fgValueNumberAddExceptionSetForCkFinite(GenTree* tree); // Adds the exception sets for the current tree node void fgValueNumberAddExceptionSet(GenTree* tree); #ifdef DEBUG void fgDebugCheckExceptionSets(); void fgDebugCheckValueNumberedTree(GenTree* tree); #endif // These are the current value number for the memory implicit variables while // doing value numbering. These are the value numbers under the "liberal" interpretation // of memory values; the "conservative" interpretation needs no VN, since every access of // memory yields an unknown value. ValueNum fgCurMemoryVN[MemoryKindCount]; // Return a "pseudo"-class handle for an array element type. If "elemType" is TYP_STRUCT, // requires "elemStructType" to be non-null (and to have a low-order zero). Otherwise, low order bit // is 1, and the rest is an encoding of "elemTyp". static CORINFO_CLASS_HANDLE EncodeElemType(var_types elemTyp, CORINFO_CLASS_HANDLE elemStructType) { if (elemStructType != nullptr) { assert(varTypeIsStruct(elemTyp) || elemTyp == TYP_REF || elemTyp == TYP_BYREF || varTypeIsIntegral(elemTyp)); assert((size_t(elemStructType) & 0x1) == 0x0); // Make sure the encoding below is valid. return elemStructType; } else { assert(elemTyp != TYP_STRUCT); elemTyp = varTypeToSigned(elemTyp); return CORINFO_CLASS_HANDLE(size_t(elemTyp) << 1 | 0x1); } } // If "clsHnd" is the result of an "EncodePrim" call, returns true and sets "*pPrimType" to the // var_types it represents. Otherwise, returns TYP_STRUCT (on the assumption that "clsHnd" is // the struct type of the element). static var_types DecodeElemType(CORINFO_CLASS_HANDLE clsHnd) { size_t clsHndVal = size_t(clsHnd); if (clsHndVal & 0x1) { return var_types(clsHndVal >> 1); } else { return TYP_STRUCT; } } // Convert a BYTE which represents the VM's CorInfoGCtype to the JIT's var_types var_types getJitGCType(BYTE gcType); // Returns true if the provided type should be treated as a primitive type // for the unmanaged calling conventions. bool isNativePrimitiveStructType(CORINFO_CLASS_HANDLE clsHnd); enum structPassingKind { SPK_Unknown, // Invalid value, never returned SPK_PrimitiveType, // The struct is passed/returned using a primitive type. SPK_EnclosingType, // Like SPK_Primitive type, but used for return types that // require a primitive type temp that is larger than the struct size. // Currently used for structs of size 3, 5, 6, or 7 bytes. SPK_ByValue, // The struct is passed/returned by value (using the ABI rules) // for ARM64 and UNIX_X64 in multiple registers. (when all of the // parameters registers are used, then the stack will be used) // for X86 passed on the stack, for ARM32 passed in registers // or the stack or split between registers and the stack. SPK_ByValueAsHfa, // The struct is passed/returned as an HFA in multiple registers. SPK_ByReference }; // The struct is passed/returned by reference to a copy/buffer. // Get the "primitive" type that is is used when we are given a struct of size 'structSize'. // For pointer sized structs the 'clsHnd' is used to determine if the struct contains GC ref. // A "primitive" type is one of the scalar types: byte, short, int, long, ref, float, double // If we can't or shouldn't use a "primitive" type then TYP_UNKNOWN is returned. // // isVarArg is passed for use on Windows Arm64 to change the decision returned regarding // hfa types. // var_types getPrimitiveTypeForStruct(unsigned structSize, CORINFO_CLASS_HANDLE clsHnd, bool isVarArg); // Get the type that is used to pass values of the given struct type. // isVarArg is passed for use on Windows Arm64 to change the decision returned regarding // hfa types. // var_types getArgTypeForStruct(CORINFO_CLASS_HANDLE clsHnd, structPassingKind* wbPassStruct, bool isVarArg, unsigned structSize); // Get the type that is used to return values of the given struct type. // If the size is unknown, pass 0 and it will be determined from 'clsHnd'. var_types getReturnTypeForStruct(CORINFO_CLASS_HANDLE clsHnd, CorInfoCallConvExtension callConv, structPassingKind* wbPassStruct = nullptr, unsigned structSize = 0); #ifdef DEBUG // Print a representation of "vnp" or "vn" on standard output. // If "level" is non-zero, we also print out a partial expansion of the value. void vnpPrint(ValueNumPair vnp, unsigned level); void vnPrint(ValueNum vn, unsigned level); #endif bool fgDominate(BasicBlock* b1, BasicBlock* b2); // Return true if b1 dominates b2 // Dominator computation member functions // Not exposed outside Compiler protected: bool fgReachable(BasicBlock* b1, BasicBlock* b2); // Returns true if block b1 can reach block b2 // Compute immediate dominators, the dominator tree and and its pre/post-order travsersal numbers. void fgComputeDoms(); void fgCompDominatedByExceptionalEntryBlocks(); BlockSet_ValRet_T fgGetDominatorSet(BasicBlock* block); // Returns a set of blocks that dominate the given block. // Note: this is relatively slow compared to calling fgDominate(), // especially if dealing with a single block versus block check. void fgComputeReachabilitySets(); // Compute bbReach sets. (Also sets BBF_GC_SAFE_POINT flag on blocks.) void fgComputeReturnBlocks(); // Initialize fgReturnBlocks to a list of BBJ_RETURN blocks. void fgComputeEnterBlocksSet(); // Compute the set of entry blocks, 'fgEnterBlks'. bool fgRemoveUnreachableBlocks(); // Remove blocks determined to be unreachable by the bbReach sets. void fgComputeReachability(); // Perform flow graph node reachability analysis. BasicBlock* fgIntersectDom(BasicBlock* a, BasicBlock* b); // Intersect two immediate dominator sets. void fgDfsInvPostOrder(); // In order to compute dominance using fgIntersectDom, the flow graph nodes must be // processed in topological sort, this function takes care of that. void fgDfsInvPostOrderHelper(BasicBlock* block, BlockSet& visited, unsigned* count); BlockSet_ValRet_T fgDomFindStartNodes(); // Computes which basic blocks don't have incoming edges in the flow graph. // Returns this as a set. INDEBUG(void fgDispDomTree(DomTreeNode* domTree);) // Helper that prints out the Dominator Tree in debug builds. DomTreeNode* fgBuildDomTree(); // Once we compute all the immediate dominator sets for each node in the flow graph // (performed by fgComputeDoms), this procedure builds the dominance tree represented // adjacency lists. // In order to speed up the queries of the form 'Does A dominates B', we can perform a DFS preorder and postorder // traversal of the dominance tree and the dominance query will become A dominates B iif preOrder(A) <= preOrder(B) // && postOrder(A) >= postOrder(B) making the computation O(1). void fgNumberDomTree(DomTreeNode* domTree); // When the flow graph changes, we need to update the block numbers, predecessor lists, reachability sets, // dominators, and possibly loops. void fgUpdateChangedFlowGraph(const bool computePreds = true, const bool computeDoms = true, const bool computeReturnBlocks = false, const bool computeLoops = false); public: // Compute the predecessors of the blocks in the control flow graph. void fgComputePreds(); // Remove all predecessor information. void fgRemovePreds(); // Compute the cheap flow graph predecessors lists. This is used in some early phases // before the full predecessors lists are computed. void fgComputeCheapPreds(); private: void fgAddCheapPred(BasicBlock* block, BasicBlock* blockPred); void fgRemoveCheapPred(BasicBlock* block, BasicBlock* blockPred); public: enum GCPollType { GCPOLL_NONE, GCPOLL_CALL, GCPOLL_INLINE }; // Initialize the per-block variable sets (used for liveness analysis). void fgInitBlockVarSets(); PhaseStatus fgInsertGCPolls(); BasicBlock* fgCreateGCPoll(GCPollType pollType, BasicBlock* block); // Requires that "block" is a block that returns from // a finally. Returns the number of successors (jump targets of // of blocks in the covered "try" that did a "LEAVE".) unsigned fgNSuccsOfFinallyRet(BasicBlock* block); // Requires that "block" is a block that returns (in the sense of BBJ_EHFINALLYRET) from // a finally. Returns its "i"th successor (jump targets of // of blocks in the covered "try" that did a "LEAVE".) // Requires that "i" < fgNSuccsOfFinallyRet(block). BasicBlock* fgSuccOfFinallyRet(BasicBlock* block, unsigned i); private: // Factor out common portions of the impls of the methods above. void fgSuccOfFinallyRetWork(BasicBlock* block, unsigned i, BasicBlock** bres, unsigned* nres); public: // For many purposes, it is desirable to be able to enumerate the *distinct* targets of a switch statement, // skipping duplicate targets. (E.g., in flow analyses that are only interested in the set of possible targets.) // SwitchUniqueSuccSet contains the non-duplicated switch targets. // (Code that modifies the jump table of a switch has an obligation to call Compiler::UpdateSwitchTableTarget, // which in turn will call the "UpdateTarget" method of this type if a SwitchUniqueSuccSet has already // been computed for the switch block. If a switch block is deleted or is transformed into a non-switch, // we leave the entry associated with the block, but it will no longer be accessed.) struct SwitchUniqueSuccSet { unsigned numDistinctSuccs; // Number of distinct targets of the switch. BasicBlock** nonDuplicates; // Array of "numDistinctSuccs", containing all the distinct switch target // successors. // The switch block "switchBlk" just had an entry with value "from" modified to the value "to". // Update "this" as necessary: if "from" is no longer an element of the jump table of "switchBlk", // remove it from "this", and ensure that "to" is a member. Use "alloc" to do any required allocation. void UpdateTarget(CompAllocator alloc, BasicBlock* switchBlk, BasicBlock* from, BasicBlock* to); }; typedef JitHashTable<BasicBlock*, JitPtrKeyFuncs<BasicBlock>, SwitchUniqueSuccSet> BlockToSwitchDescMap; private: // Maps BasicBlock*'s that end in switch statements to SwitchUniqueSuccSets that allow // iteration over only the distinct successors. BlockToSwitchDescMap* m_switchDescMap; public: BlockToSwitchDescMap* GetSwitchDescMap(bool createIfNull = true) { if ((m_switchDescMap == nullptr) && createIfNull) { m_switchDescMap = new (getAllocator()) BlockToSwitchDescMap(getAllocator()); } return m_switchDescMap; } // Invalidate the map of unique switch block successors. For example, since the hash key of the map // depends on block numbers, we must invalidate the map when the blocks are renumbered, to ensure that // we don't accidentally look up and return the wrong switch data. void InvalidateUniqueSwitchSuccMap() { m_switchDescMap = nullptr; } // Requires "switchBlock" to be a block that ends in a switch. Returns // the corresponding SwitchUniqueSuccSet. SwitchUniqueSuccSet GetDescriptorForSwitch(BasicBlock* switchBlk); // The switch block "switchBlk" just had an entry with value "from" modified to the value "to". // Update "this" as necessary: if "from" is no longer an element of the jump table of "switchBlk", // remove it from "this", and ensure that "to" is a member. void UpdateSwitchTableTarget(BasicBlock* switchBlk, BasicBlock* from, BasicBlock* to); // Remove the "SwitchUniqueSuccSet" of "switchBlk" in the BlockToSwitchDescMap. void fgInvalidateSwitchDescMapEntry(BasicBlock* switchBlk); BasicBlock* fgFirstBlockOfHandler(BasicBlock* block); bool fgIsFirstBlockOfFilterOrHandler(BasicBlock* block); flowList* fgGetPredForBlock(BasicBlock* block, BasicBlock* blockPred); flowList* fgGetPredForBlock(BasicBlock* block, BasicBlock* blockPred, flowList*** ptrToPred); flowList* fgRemoveRefPred(BasicBlock* block, BasicBlock* blockPred); flowList* fgRemoveAllRefPreds(BasicBlock* block, BasicBlock* blockPred); void fgRemoveBlockAsPred(BasicBlock* block); void fgChangeSwitchBlock(BasicBlock* oldSwitchBlock, BasicBlock* newSwitchBlock); void fgReplaceSwitchJumpTarget(BasicBlock* blockSwitch, BasicBlock* newTarget, BasicBlock* oldTarget); void fgReplaceJumpTarget(BasicBlock* block, BasicBlock* newTarget, BasicBlock* oldTarget); void fgReplacePred(BasicBlock* block, BasicBlock* oldPred, BasicBlock* newPred); flowList* fgAddRefPred(BasicBlock* block, BasicBlock* blockPred, flowList* oldEdge = nullptr, bool initializingPreds = false); // Only set to 'true' when we are computing preds in // fgComputePreds() void fgFindBasicBlocks(); bool fgIsBetterFallThrough(BasicBlock* bCur, BasicBlock* bAlt); bool fgCheckEHCanInsertAfterBlock(BasicBlock* blk, unsigned regionIndex, bool putInTryRegion); BasicBlock* fgFindInsertPoint(unsigned regionIndex, bool putInTryRegion, BasicBlock* startBlk, BasicBlock* endBlk, BasicBlock* nearBlk, BasicBlock* jumpBlk, bool runRarely); unsigned fgGetNestingLevel(BasicBlock* block, unsigned* pFinallyNesting = nullptr); void fgPostImportationCleanup(); void fgRemoveStmt(BasicBlock* block, Statement* stmt DEBUGARG(bool isUnlink = false)); void fgUnlinkStmt(BasicBlock* block, Statement* stmt); bool fgCheckRemoveStmt(BasicBlock* block, Statement* stmt); void fgCreateLoopPreHeader(unsigned lnum); void fgUnreachableBlock(BasicBlock* block); void fgRemoveConditionalJump(BasicBlock* block); BasicBlock* fgLastBBInMainFunction(); BasicBlock* fgEndBBAfterMainFunction(); void fgUnlinkRange(BasicBlock* bBeg, BasicBlock* bEnd); void fgRemoveBlock(BasicBlock* block, bool unreachable); bool fgCanCompactBlocks(BasicBlock* block, BasicBlock* bNext); void fgCompactBlocks(BasicBlock* block, BasicBlock* bNext); void fgUpdateLoopsAfterCompacting(BasicBlock* block, BasicBlock* bNext); BasicBlock* fgConnectFallThrough(BasicBlock* bSrc, BasicBlock* bDst); bool fgRenumberBlocks(); bool fgExpandRarelyRunBlocks(); bool fgEhAllowsMoveBlock(BasicBlock* bBefore, BasicBlock* bAfter); void fgMoveBlocksAfter(BasicBlock* bStart, BasicBlock* bEnd, BasicBlock* insertAfterBlk); enum FG_RELOCATE_TYPE { FG_RELOCATE_TRY, // relocate the 'try' region FG_RELOCATE_HANDLER // relocate the handler region (including the filter if necessary) }; BasicBlock* fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE relocateType); #if defined(FEATURE_EH_FUNCLETS) #if defined(TARGET_ARM) void fgClearFinallyTargetBit(BasicBlock* block); #endif // defined(TARGET_ARM) bool fgIsIntraHandlerPred(BasicBlock* predBlock, BasicBlock* block); bool fgAnyIntraHandlerPreds(BasicBlock* block); void fgInsertFuncletPrologBlock(BasicBlock* block); void fgCreateFuncletPrologBlocks(); void fgCreateFunclets(); #else // !FEATURE_EH_FUNCLETS bool fgRelocateEHRegions(); #endif // !FEATURE_EH_FUNCLETS bool fgOptimizeUncondBranchToSimpleCond(BasicBlock* block, BasicBlock* target); bool fgBlockEndFavorsTailDuplication(BasicBlock* block, unsigned lclNum); bool fgBlockIsGoodTailDuplicationCandidate(BasicBlock* block, unsigned* lclNum); bool fgOptimizeEmptyBlock(BasicBlock* block); bool fgOptimizeBranchToEmptyUnconditional(BasicBlock* block, BasicBlock* bDest); bool fgOptimizeBranch(BasicBlock* bJump); bool fgOptimizeSwitchBranches(BasicBlock* block); bool fgOptimizeBranchToNext(BasicBlock* block, BasicBlock* bNext, BasicBlock* bPrev); bool fgOptimizeSwitchJumps(); #ifdef DEBUG void fgPrintEdgeWeights(); #endif void fgComputeBlockAndEdgeWeights(); weight_t fgComputeMissingBlockWeights(); void fgComputeCalledCount(weight_t returnWeight); void fgComputeEdgeWeights(); bool fgReorderBlocks(); PhaseStatus fgDetermineFirstColdBlock(); bool fgIsForwardBranch(BasicBlock* bJump, BasicBlock* bSrc = nullptr); bool fgUpdateFlowGraph(bool doTailDup = false); void fgFindOperOrder(); // method that returns if you should split here typedef bool(fgSplitPredicate)(GenTree* tree, GenTree* parent, fgWalkData* data); void fgSetBlockOrder(); void fgRemoveReturnBlock(BasicBlock* block); /* Helper code that has been factored out */ inline void fgConvertBBToThrowBB(BasicBlock* block); bool fgCastNeeded(GenTree* tree, var_types toType); GenTree* fgDoNormalizeOnStore(GenTree* tree); GenTree* fgMakeTmpArgNode(fgArgTabEntry* curArgTabEntry); // The following check for loops that don't execute calls bool fgLoopCallMarked; void fgLoopCallTest(BasicBlock* srcBB, BasicBlock* dstBB); void fgLoopCallMark(); void fgMarkLoopHead(BasicBlock* block); unsigned fgGetCodeEstimate(BasicBlock* block); #if DUMP_FLOWGRAPHS enum class PhasePosition { PrePhase, PostPhase }; const char* fgProcessEscapes(const char* nameIn, escapeMapping_t* map); static void fgDumpTree(FILE* fgxFile, GenTree* const tree); FILE* fgOpenFlowGraphFile(bool* wbDontClose, Phases phase, PhasePosition pos, LPCWSTR type); bool fgDumpFlowGraph(Phases phase, PhasePosition pos); #endif // DUMP_FLOWGRAPHS #ifdef DEBUG void fgDispDoms(); void fgDispReach(); void fgDispBBLiveness(BasicBlock* block); void fgDispBBLiveness(); void fgTableDispBasicBlock(BasicBlock* block, int ibcColWidth = 0); void fgDispBasicBlocks(BasicBlock* firstBlock, BasicBlock* lastBlock, bool dumpTrees); void fgDispBasicBlocks(bool dumpTrees = false); void fgDumpStmtTree(Statement* stmt, unsigned bbNum); void fgDumpBlock(BasicBlock* block); void fgDumpTrees(BasicBlock* firstBlock, BasicBlock* lastBlock); static fgWalkPreFn fgStress64RsltMulCB; void fgStress64RsltMul(); void fgDebugCheckUpdate(); void fgDebugCheckBBNumIncreasing(); void fgDebugCheckBBlist(bool checkBBNum = false, bool checkBBRefs = true); void fgDebugCheckBlockLinks(); void fgDebugCheckLinks(bool morphTrees = false); void fgDebugCheckStmtsList(BasicBlock* block, bool morphTrees); void fgDebugCheckNodeLinks(BasicBlock* block, Statement* stmt); void fgDebugCheckNodesUniqueness(); void fgDebugCheckLoopTable(); void fgDebugCheckFlags(GenTree* tree); void fgDebugCheckDispFlags(GenTree* tree, GenTreeFlags dispFlags, GenTreeDebugFlags debugFlags); void fgDebugCheckFlagsHelper(GenTree* tree, GenTreeFlags actualFlags, GenTreeFlags expectedFlags); void fgDebugCheckTryFinallyExits(); void fgDebugCheckProfileData(); bool fgDebugCheckIncomingProfileData(BasicBlock* block); bool fgDebugCheckOutgoingProfileData(BasicBlock* block); #endif // DEBUG static bool fgProfileWeightsEqual(weight_t weight1, weight_t weight2); static bool fgProfileWeightsConsistent(weight_t weight1, weight_t weight2); static GenTree* fgGetFirstNode(GenTree* tree); //--------------------- Walking the trees in the IR ----------------------- struct fgWalkData { Compiler* compiler; fgWalkPreFn* wtprVisitorFn; fgWalkPostFn* wtpoVisitorFn; void* pCallbackData; // user-provided data GenTree* parent; // parent of current node, provided to callback GenTreeStack* parentStack; // stack of parent nodes, if asked for bool wtprLclsOnly; // whether to only visit lclvar nodes #ifdef DEBUG bool printModified; // callback can use this #endif }; fgWalkResult fgWalkTreePre(GenTree** pTree, fgWalkPreFn* visitor, void* pCallBackData = nullptr, bool lclVarsOnly = false, bool computeStack = false); fgWalkResult fgWalkTree(GenTree** pTree, fgWalkPreFn* preVisitor, fgWalkPostFn* postVisitor, void* pCallBackData = nullptr); void fgWalkAllTreesPre(fgWalkPreFn* visitor, void* pCallBackData); //----- Postorder fgWalkResult fgWalkTreePost(GenTree** pTree, fgWalkPostFn* visitor, void* pCallBackData = nullptr, bool computeStack = false); // An fgWalkPreFn that looks for expressions that have inline throws in // minopts mode. Basically it looks for tress with gtOverflowEx() or // GTF_IND_RNGCHK. It returns WALK_ABORT if one is found. It // returns WALK_SKIP_SUBTREES if GTF_EXCEPT is not set (assumes flags // properly propagated to parent trees). It returns WALK_CONTINUE // otherwise. static fgWalkResult fgChkThrowCB(GenTree** pTree, Compiler::fgWalkData* data); static fgWalkResult fgChkLocAllocCB(GenTree** pTree, Compiler::fgWalkData* data); static fgWalkResult fgChkQmarkCB(GenTree** pTree, Compiler::fgWalkData* data); /************************************************************************** * PROTECTED *************************************************************************/ protected: friend class SsaBuilder; friend struct ValueNumberState; //--------------------- Detect the basic blocks --------------------------- BasicBlock** fgBBs; // Table of pointers to the BBs void fgInitBBLookup(); BasicBlock* fgLookupBB(unsigned addr); bool fgCanSwitchToOptimized(); void fgSwitchToOptimized(const char* reason); bool fgMayExplicitTailCall(); void fgFindJumpTargets(const BYTE* codeAddr, IL_OFFSET codeSize, FixedBitVect* jumpTarget); void fgMarkBackwardJump(BasicBlock* startBlock, BasicBlock* endBlock); void fgLinkBasicBlocks(); unsigned fgMakeBasicBlocks(const BYTE* codeAddr, IL_OFFSET codeSize, FixedBitVect* jumpTarget); void fgCheckBasicBlockControlFlow(); void fgControlFlowPermitted(BasicBlock* blkSrc, BasicBlock* blkDest, bool IsLeave = false /* is the src a leave block */); bool fgFlowToFirstBlockOfInnerTry(BasicBlock* blkSrc, BasicBlock* blkDest, bool sibling); void fgObserveInlineConstants(OPCODE opcode, const FgStack& stack, bool isInlining); void fgAdjustForAddressExposedOrWrittenThis(); unsigned fgStressBBProf() { #ifdef DEBUG unsigned result = JitConfig.JitStressBBProf(); if (result == 0) { if (compStressCompile(STRESS_BB_PROFILE, 15)) { result = 1; } } return result; #else return 0; #endif } bool fgHaveProfileData(); bool fgGetProfileWeightForBasicBlock(IL_OFFSET offset, weight_t* weight); Instrumentor* fgCountInstrumentor; Instrumentor* fgClassInstrumentor; PhaseStatus fgPrepareToInstrumentMethod(); PhaseStatus fgInstrumentMethod(); PhaseStatus fgIncorporateProfileData(); void fgIncorporateBlockCounts(); void fgIncorporateEdgeCounts(); CORINFO_CLASS_HANDLE getRandomClass(ICorJitInfo::PgoInstrumentationSchema* schema, UINT32 countSchemaItems, BYTE* pInstrumentationData, int32_t ilOffset, CLRRandom* random); public: const char* fgPgoFailReason; bool fgPgoDisabled; ICorJitInfo::PgoSource fgPgoSource; ICorJitInfo::PgoInstrumentationSchema* fgPgoSchema; BYTE* fgPgoData; UINT32 fgPgoSchemaCount; HRESULT fgPgoQueryResult; UINT32 fgNumProfileRuns; UINT32 fgPgoBlockCounts; UINT32 fgPgoEdgeCounts; UINT32 fgPgoClassProfiles; unsigned fgPgoInlineePgo; unsigned fgPgoInlineeNoPgo; unsigned fgPgoInlineeNoPgoSingleBlock; void WalkSpanningTree(SpanningTreeVisitor* visitor); void fgSetProfileWeight(BasicBlock* block, weight_t weight); void fgApplyProfileScale(); bool fgHaveSufficientProfileData(); bool fgHaveTrustedProfileData(); // fgIsUsingProfileWeights - returns true if we have real profile data for this method // or if we have some fake profile data for the stress mode bool fgIsUsingProfileWeights() { return (fgHaveProfileData() || fgStressBBProf()); } // fgProfileRunsCount - returns total number of scenario runs for the profile data // or BB_UNITY_WEIGHT_UNSIGNED when we aren't using profile data. unsigned fgProfileRunsCount() { return fgIsUsingProfileWeights() ? fgNumProfileRuns : BB_UNITY_WEIGHT_UNSIGNED; } //-------- Insert a statement at the start or end of a basic block -------- #ifdef DEBUG public: static bool fgBlockContainsStatementBounded(BasicBlock* block, Statement* stmt, bool answerOnBoundExceeded = true); #endif public: Statement* fgNewStmtAtBeg(BasicBlock* block, GenTree* tree, const DebugInfo& di = DebugInfo()); void fgInsertStmtAtEnd(BasicBlock* block, Statement* stmt); Statement* fgNewStmtAtEnd(BasicBlock* block, GenTree* tree, const DebugInfo& di = DebugInfo()); Statement* fgNewStmtNearEnd(BasicBlock* block, GenTree* tree, const DebugInfo& di = DebugInfo()); private: void fgInsertStmtNearEnd(BasicBlock* block, Statement* stmt); void fgInsertStmtAtBeg(BasicBlock* block, Statement* stmt); void fgInsertStmtAfter(BasicBlock* block, Statement* insertionPoint, Statement* stmt); public: void fgInsertStmtBefore(BasicBlock* block, Statement* insertionPoint, Statement* stmt); private: Statement* fgInsertStmtListAfter(BasicBlock* block, Statement* stmtAfter, Statement* stmtList); // Create a new temporary variable to hold the result of *ppTree, // and transform the graph accordingly. GenTree* fgInsertCommaFormTemp(GenTree** ppTree, CORINFO_CLASS_HANDLE structType = nullptr); GenTree* fgMakeMultiUse(GenTree** ppTree); private: // Recognize a bitwise rotation pattern and convert into a GT_ROL or a GT_ROR node. GenTree* fgRecognizeAndMorphBitwiseRotation(GenTree* tree); bool fgOperIsBitwiseRotationRoot(genTreeOps oper); #if !defined(TARGET_64BIT) // Recognize and morph a long multiplication with 32 bit operands. GenTreeOp* fgRecognizeAndMorphLongMul(GenTreeOp* mul); GenTreeOp* fgMorphLongMul(GenTreeOp* mul); #endif //-------- Determine the order in which the trees will be evaluated ------- unsigned fgTreeSeqNum; GenTree* fgTreeSeqLst; GenTree* fgTreeSeqBeg; GenTree* fgSetTreeSeq(GenTree* tree, GenTree* prev = nullptr, bool isLIR = false); void fgSetTreeSeqHelper(GenTree* tree, bool isLIR); void fgSetTreeSeqFinish(GenTree* tree, bool isLIR); void fgSetStmtSeq(Statement* stmt); void fgSetBlockOrder(BasicBlock* block); //------------------------- Morphing -------------------------------------- unsigned fgPtrArgCntMax; public: //------------------------------------------------------------------------ // fgGetPtrArgCntMax: Return the maximum number of pointer-sized stack arguments that calls inside this method // can push on the stack. This value is calculated during morph. // // Return Value: // Returns fgPtrArgCntMax, that is a private field. // unsigned fgGetPtrArgCntMax() const { return fgPtrArgCntMax; } //------------------------------------------------------------------------ // fgSetPtrArgCntMax: Set the maximum number of pointer-sized stack arguments that calls inside this method // can push on the stack. This function is used during StackLevelSetter to fix incorrect morph calculations. // void fgSetPtrArgCntMax(unsigned argCntMax) { fgPtrArgCntMax = argCntMax; } bool compCanEncodePtrArgCntMax(); private: hashBv* fgOutgoingArgTemps; hashBv* fgCurrentlyInUseArgTemps; void fgSetRngChkTarget(GenTree* tree, bool delay = true); BasicBlock* fgSetRngChkTargetInner(SpecialCodeKind kind, bool delay); #if REARRANGE_ADDS void fgMoveOpsLeft(GenTree* tree); #endif bool fgIsCommaThrow(GenTree* tree, bool forFolding = false); bool fgIsThrow(GenTree* tree); bool fgInDifferentRegions(BasicBlock* blk1, BasicBlock* blk2); bool fgIsBlockCold(BasicBlock* block); GenTree* fgMorphCastIntoHelper(GenTree* tree, int helper, GenTree* oper); GenTree* fgMorphIntoHelperCall(GenTree* tree, int helper, GenTreeCall::Use* args, bool morphArgs = true); GenTree* fgMorphStackArgForVarArgs(unsigned lclNum, var_types varType, unsigned lclOffs); // A "MorphAddrContext" carries information from the surrounding context. If we are evaluating a byref address, // it is useful to know whether the address will be immediately dereferenced, or whether the address value will // be used, perhaps by passing it as an argument to a called method. This affects how null checking is done: // for sufficiently small offsets, we can rely on OS page protection to implicitly null-check addresses that we // know will be dereferenced. To know that reliance on implicit null checking is sound, we must further know that // all offsets between the top-level indirection and the bottom are constant, and that their sum is sufficiently // small; hence the other fields of MorphAddrContext. enum MorphAddrContextKind { MACK_Ind, MACK_Addr, }; struct MorphAddrContext { MorphAddrContextKind m_kind; bool m_allConstantOffsets; // Valid only for "m_kind == MACK_Ind". True iff all offsets between // top-level indirection and here have been constants. size_t m_totalOffset; // Valid only for "m_kind == MACK_Ind", and if "m_allConstantOffsets" is true. // In that case, is the sum of those constant offsets. MorphAddrContext(MorphAddrContextKind kind) : m_kind(kind), m_allConstantOffsets(true), m_totalOffset(0) { } }; // A MACK_CopyBlock context is immutable, so we can just make one of these and share it. static MorphAddrContext s_CopyBlockMAC; #ifdef FEATURE_SIMD GenTree* getSIMDStructFromField(GenTree* tree, CorInfoType* simdBaseJitTypeOut, unsigned* indexOut, unsigned* simdSizeOut, bool ignoreUsedInSIMDIntrinsic = false); GenTree* fgMorphFieldAssignToSimdSetElement(GenTree* tree); GenTree* fgMorphFieldToSimdGetElement(GenTree* tree); bool fgMorphCombineSIMDFieldAssignments(BasicBlock* block, Statement* stmt); void impMarkContiguousSIMDFieldAssignments(Statement* stmt); // fgPreviousCandidateSIMDFieldAsgStmt is only used for tracking previous simd field assignment // in function: Complier::impMarkContiguousSIMDFieldAssignments. Statement* fgPreviousCandidateSIMDFieldAsgStmt; #endif // FEATURE_SIMD GenTree* fgMorphArrayIndex(GenTree* tree); GenTree* fgMorphExpandCast(GenTreeCast* tree); GenTreeFieldList* fgMorphLclArgToFieldlist(GenTreeLclVarCommon* lcl); void fgInitArgInfo(GenTreeCall* call); GenTreeCall* fgMorphArgs(GenTreeCall* call); void fgMakeOutgoingStructArgCopy(GenTreeCall* call, GenTreeCall::Use* args, CORINFO_CLASS_HANDLE copyBlkClass); GenTree* fgMorphLocalVar(GenTree* tree, bool forceRemorph); public: bool fgAddrCouldBeNull(GenTree* addr); private: GenTree* fgMorphField(GenTree* tree, MorphAddrContext* mac); bool fgCanFastTailCall(GenTreeCall* call, const char** failReason); #if FEATURE_FASTTAILCALL bool fgCallHasMustCopyByrefParameter(GenTreeCall* callee); #endif bool fgCheckStmtAfterTailCall(); GenTree* fgMorphTailCallViaHelpers(GenTreeCall* call, CORINFO_TAILCALL_HELPERS& help); bool fgCanTailCallViaJitHelper(); void fgMorphTailCallViaJitHelper(GenTreeCall* call); GenTree* fgCreateCallDispatcherAndGetResult(GenTreeCall* origCall, CORINFO_METHOD_HANDLE callTargetStubHnd, CORINFO_METHOD_HANDLE dispatcherHnd); GenTree* getLookupTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, GenTreeFlags handleFlags, void* compileTimeHandle); GenTree* getRuntimeLookupTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, void* compileTimeHandle); GenTree* getVirtMethodPointerTree(GenTree* thisPtr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo); GenTree* getTokenHandleTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool parent); GenTree* fgMorphPotentialTailCall(GenTreeCall* call); GenTree* fgGetStubAddrArg(GenTreeCall* call); unsigned fgGetArgTabEntryParameterLclNum(GenTreeCall* call, fgArgTabEntry* argTabEntry); void fgMorphRecursiveFastTailCallIntoLoop(BasicBlock* block, GenTreeCall* recursiveTailCall); Statement* fgAssignRecursiveCallArgToCallerParam(GenTree* arg, fgArgTabEntry* argTabEntry, unsigned lclParamNum, BasicBlock* block, const DebugInfo& callDI, Statement* tmpAssignmentInsertionPoint, Statement* paramAssignmentInsertionPoint); GenTree* fgMorphCall(GenTreeCall* call); GenTree* fgExpandVirtualVtableCallTarget(GenTreeCall* call); void fgMorphCallInline(GenTreeCall* call, InlineResult* result); void fgMorphCallInlineHelper(GenTreeCall* call, InlineResult* result, InlineContext** createdContext); #if DEBUG void fgNoteNonInlineCandidate(Statement* stmt, GenTreeCall* call); static fgWalkPreFn fgFindNonInlineCandidate; #endif GenTree* fgOptimizeDelegateConstructor(GenTreeCall* call, CORINFO_CONTEXT_HANDLE* ExactContextHnd, CORINFO_RESOLVED_TOKEN* ldftnToken); GenTree* fgMorphLeaf(GenTree* tree); void fgAssignSetVarDef(GenTree* tree); GenTree* fgMorphOneAsgBlockOp(GenTree* tree); GenTree* fgMorphInitBlock(GenTree* tree); GenTree* fgMorphPromoteLocalInitBlock(GenTreeLclVar* destLclNode, GenTree* initVal, unsigned blockSize); GenTree* fgMorphGetStructAddr(GenTree** pTree, CORINFO_CLASS_HANDLE clsHnd, bool isRValue = false); GenTree* fgMorphBlockOperand(GenTree* tree, var_types asgType, unsigned blockWidth, bool isBlkReqd); GenTree* fgMorphCopyBlock(GenTree* tree); GenTree* fgMorphStoreDynBlock(GenTreeStoreDynBlk* tree); GenTree* fgMorphForRegisterFP(GenTree* tree); GenTree* fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac = nullptr); GenTree* fgOptimizeCast(GenTreeCast* cast); GenTree* fgOptimizeEqualityComparisonWithConst(GenTreeOp* cmp); GenTree* fgOptimizeRelationalComparisonWithConst(GenTreeOp* cmp); #ifdef FEATURE_HW_INTRINSICS GenTree* fgOptimizeHWIntrinsic(GenTreeHWIntrinsic* node); #endif GenTree* fgOptimizeCommutativeArithmetic(GenTreeOp* tree); GenTree* fgOptimizeRelationalComparisonWithCasts(GenTreeOp* cmp); GenTree* fgOptimizeAddition(GenTreeOp* add); GenTree* fgOptimizeMultiply(GenTreeOp* mul); GenTree* fgOptimizeBitwiseAnd(GenTreeOp* andOp); GenTree* fgPropagateCommaThrow(GenTree* parent, GenTreeOp* commaThrow, GenTreeFlags precedingSideEffects); GenTree* fgMorphRetInd(GenTreeUnOp* tree); GenTree* fgMorphModToSubMulDiv(GenTreeOp* tree); GenTree* fgMorphSmpOpOptional(GenTreeOp* tree); GenTree* fgMorphMultiOp(GenTreeMultiOp* multiOp); GenTree* fgMorphConst(GenTree* tree); bool fgMorphCanUseLclFldForCopy(unsigned lclNum1, unsigned lclNum2); GenTreeLclVar* fgMorphTryFoldObjAsLclVar(GenTreeObj* obj, bool destroyNodes = true); GenTreeOp* fgMorphCommutative(GenTreeOp* tree); GenTree* fgMorphCastedBitwiseOp(GenTreeOp* tree); GenTree* fgMorphReduceAddOps(GenTree* tree); public: GenTree* fgMorphTree(GenTree* tree, MorphAddrContext* mac = nullptr); private: void fgKillDependentAssertionsSingle(unsigned lclNum DEBUGARG(GenTree* tree)); void fgKillDependentAssertions(unsigned lclNum DEBUGARG(GenTree* tree)); void fgMorphTreeDone(GenTree* tree, GenTree* oldTree = nullptr DEBUGARG(int morphNum = 0)); Statement* fgMorphStmt; unsigned fgGetBigOffsetMorphingTemp(var_types type); // We cache one temp per type to be // used when morphing big offset. //----------------------- Liveness analysis ------------------------------- VARSET_TP fgCurUseSet; // vars used by block (before an assignment) VARSET_TP fgCurDefSet; // vars assigned by block (before a use) MemoryKindSet fgCurMemoryUse; // True iff the current basic block uses memory. MemoryKindSet fgCurMemoryDef; // True iff the current basic block modifies memory. MemoryKindSet fgCurMemoryHavoc; // True if the current basic block is known to set memory to a "havoc" value. bool byrefStatesMatchGcHeapStates; // True iff GcHeap and ByrefExposed memory have all the same def points. void fgMarkUseDef(GenTreeLclVarCommon* tree); void fgBeginScopeLife(VARSET_TP* inScope, VarScopeDsc* var); void fgEndScopeLife(VARSET_TP* inScope, VarScopeDsc* var); void fgMarkInScope(BasicBlock* block, VARSET_VALARG_TP inScope); void fgUnmarkInScope(BasicBlock* block, VARSET_VALARG_TP unmarkScope); void fgExtendDbgScopes(); void fgExtendDbgLifetimes(); #ifdef DEBUG void fgDispDebugScopes(); #endif // DEBUG //------------------------------------------------------------------------- // // The following keeps track of any code we've added for things like array // range checking or explicit calls to enable GC, and so on. // public: struct AddCodeDsc { AddCodeDsc* acdNext; BasicBlock* acdDstBlk; // block to which we jump unsigned acdData; SpecialCodeKind acdKind; // what kind of a special block is this? #if !FEATURE_FIXED_OUT_ARGS bool acdStkLvlInit; // has acdStkLvl value been already set? unsigned acdStkLvl; // stack level in stack slots. #endif // !FEATURE_FIXED_OUT_ARGS }; private: static unsigned acdHelper(SpecialCodeKind codeKind); AddCodeDsc* fgAddCodeList; bool fgAddCodeModf; bool fgRngChkThrowAdded; AddCodeDsc* fgExcptnTargetCache[SCK_COUNT]; BasicBlock* fgRngChkTarget(BasicBlock* block, SpecialCodeKind kind); BasicBlock* fgAddCodeRef(BasicBlock* srcBlk, unsigned refData, SpecialCodeKind kind); public: AddCodeDsc* fgFindExcptnTarget(SpecialCodeKind kind, unsigned refData); bool fgUseThrowHelperBlocks(); AddCodeDsc* fgGetAdditionalCodeDescriptors() { return fgAddCodeList; } private: bool fgIsCodeAdded(); bool fgIsThrowHlpBlk(BasicBlock* block); #if !FEATURE_FIXED_OUT_ARGS unsigned fgThrowHlpBlkStkLevel(BasicBlock* block); #endif // !FEATURE_FIXED_OUT_ARGS unsigned fgBigOffsetMorphingTemps[TYP_COUNT]; unsigned fgCheckInlineDepthAndRecursion(InlineInfo* inlineInfo); void fgInvokeInlineeCompiler(GenTreeCall* call, InlineResult* result, InlineContext** createdContext); void fgInsertInlineeBlocks(InlineInfo* pInlineInfo); Statement* fgInlinePrependStatements(InlineInfo* inlineInfo); void fgInlineAppendStatements(InlineInfo* inlineInfo, BasicBlock* block, Statement* stmt); #if FEATURE_MULTIREG_RET GenTree* fgGetStructAsStructPtr(GenTree* tree); GenTree* fgAssignStructInlineeToVar(GenTree* child, CORINFO_CLASS_HANDLE retClsHnd); void fgAttachStructInlineeToAsg(GenTree* tree, GenTree* child, CORINFO_CLASS_HANDLE retClsHnd); #endif // FEATURE_MULTIREG_RET static fgWalkPreFn fgUpdateInlineReturnExpressionPlaceHolder; static fgWalkPostFn fgLateDevirtualization; #ifdef DEBUG static fgWalkPreFn fgDebugCheckInlineCandidates; void CheckNoTransformableIndirectCallsRemain(); static fgWalkPreFn fgDebugCheckForTransformableIndirectCalls; #endif void fgPromoteStructs(); void fgMorphStructField(GenTree* tree, GenTree* parent); void fgMorphLocalField(GenTree* tree, GenTree* parent); // Reset the refCount for implicit byrefs. void fgResetImplicitByRefRefCount(); // Change implicit byrefs' types from struct to pointer, and for any that were // promoted, create new promoted struct temps. void fgRetypeImplicitByRefArgs(); // Rewrite appearances of implicit byrefs (manifest the implied additional level of indirection). bool fgMorphImplicitByRefArgs(GenTree* tree); GenTree* fgMorphImplicitByRefArgs(GenTree* tree, bool isAddr); // Clear up annotations for any struct promotion temps created for implicit byrefs. void fgMarkDemotedImplicitByRefArgs(); void fgMarkAddressExposedLocals(); void fgMarkAddressExposedLocals(Statement* stmt); PhaseStatus fgForwardSub(); bool fgForwardSubBlock(BasicBlock* block); bool fgForwardSubStatement(Statement* statement); static fgWalkPreFn fgUpdateSideEffectsPre; static fgWalkPostFn fgUpdateSideEffectsPost; // The given local variable, required to be a struct variable, is being assigned via // a "lclField", to make it masquerade as an integral type in the ABI. Make sure that // the variable is not enregistered, and is therefore not promoted independently. void fgLclFldAssign(unsigned lclNum); static fgWalkPreFn gtHasLocalsWithAddrOpCB; enum TypeProducerKind { TPK_Unknown = 0, // May not be a RuntimeType TPK_Handle = 1, // RuntimeType via handle TPK_GetType = 2, // RuntimeType via Object.get_Type() TPK_Null = 3, // Tree value is null TPK_Other = 4 // RuntimeType via other means }; TypeProducerKind gtGetTypeProducerKind(GenTree* tree); bool gtIsTypeHandleToRuntimeTypeHelper(GenTreeCall* call); bool gtIsTypeHandleToRuntimeTypeHandleHelper(GenTreeCall* call, CorInfoHelpFunc* pHelper = nullptr); bool gtIsActiveCSE_Candidate(GenTree* tree); bool fgIsBigOffset(size_t offset); bool fgNeedReturnSpillTemp(); /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Optimizer XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: void optInit(); GenTree* optRemoveRangeCheck(GenTreeBoundsChk* check, GenTree* comma, Statement* stmt); GenTree* optRemoveStandaloneRangeCheck(GenTreeBoundsChk* check, Statement* stmt); void optRemoveCommaBasedRangeCheck(GenTree* comma, Statement* stmt); protected: // Do hoisting for all loops. void optHoistLoopCode(); // To represent sets of VN's that have already been hoisted in outer loops. typedef JitHashTable<ValueNum, JitSmallPrimitiveKeyFuncs<ValueNum>, bool> VNSet; struct LoopHoistContext { private: // The set of variables hoisted in the current loop (or nullptr if there are none). VNSet* m_pHoistedInCurLoop; public: // Value numbers of expressions that have been hoisted in parent loops in the loop nest. VNSet m_hoistedInParentLoops; // Value numbers of expressions that have been hoisted in the current (or most recent) loop in the nest. // Previous decisions on loop-invariance of value numbers in the current loop. VNSet m_curLoopVnInvariantCache; VNSet* GetHoistedInCurLoop(Compiler* comp) { if (m_pHoistedInCurLoop == nullptr) { m_pHoistedInCurLoop = new (comp->getAllocatorLoopHoist()) VNSet(comp->getAllocatorLoopHoist()); } return m_pHoistedInCurLoop; } VNSet* ExtractHoistedInCurLoop() { VNSet* res = m_pHoistedInCurLoop; m_pHoistedInCurLoop = nullptr; return res; } LoopHoistContext(Compiler* comp) : m_pHoistedInCurLoop(nullptr) , m_hoistedInParentLoops(comp->getAllocatorLoopHoist()) , m_curLoopVnInvariantCache(comp->getAllocatorLoopHoist()) { } }; // Do hoisting for loop "lnum" (an index into the optLoopTable), and all loops nested within it. // Tracks the expressions that have been hoisted by containing loops by temporarily recording their // value numbers in "m_hoistedInParentLoops". This set is not modified by the call. void optHoistLoopNest(unsigned lnum, LoopHoistContext* hoistCtxt); // Do hoisting for a particular loop ("lnum" is an index into the optLoopTable.) // Assumes that expressions have been hoisted in containing loops if their value numbers are in // "m_hoistedInParentLoops". // void optHoistThisLoop(unsigned lnum, LoopHoistContext* hoistCtxt); // Hoist all expressions in "blocks" that are invariant in loop "loopNum" (an index into the optLoopTable) // outside of that loop. Exempt expressions whose value number is in "m_hoistedInParentLoops"; add VN's of hoisted // expressions to "hoistInLoop". void optHoistLoopBlocks(unsigned loopNum, ArrayStack<BasicBlock*>* blocks, LoopHoistContext* hoistContext); // Return true if the tree looks profitable to hoist out of loop 'lnum'. bool optIsProfitableToHoistTree(GenTree* tree, unsigned lnum); // Performs the hoisting 'tree' into the PreHeader for loop 'lnum' void optHoistCandidate(GenTree* tree, BasicBlock* treeBb, unsigned lnum, LoopHoistContext* hoistCtxt); // Returns true iff the ValueNum "vn" represents a value that is loop-invariant in "lnum". // Constants and init values are always loop invariant. // VNPhi's connect VN's to the SSA definition, so we can know if the SSA def occurs in the loop. bool optVNIsLoopInvariant(ValueNum vn, unsigned lnum, VNSet* recordedVNs); // If "blk" is the entry block of a natural loop, returns true and sets "*pLnum" to the index of the loop // in the loop table. bool optBlockIsLoopEntry(BasicBlock* blk, unsigned* pLnum); // Records the set of "side effects" of all loops: fields (object instance and static) // written to, and SZ-array element type equivalence classes updated. void optComputeLoopSideEffects(); #ifdef DEBUG bool optAnyChildNotRemoved(unsigned loopNum); #endif // DEBUG // Mark a loop as removed. void optMarkLoopRemoved(unsigned loopNum); private: // Requires "lnum" to be the index of an outermost loop in the loop table. Traverses the body of that loop, // including all nested loops, and records the set of "side effects" of the loop: fields (object instance and // static) written to, and SZ-array element type equivalence classes updated. void optComputeLoopNestSideEffects(unsigned lnum); // Given a loop number 'lnum' mark it and any nested loops as having 'memoryHavoc' void optRecordLoopNestsMemoryHavoc(unsigned lnum, MemoryKindSet memoryHavoc); // Add the side effects of "blk" (which is required to be within a loop) to all loops of which it is a part. // Returns false if we encounter a block that is not marked as being inside a loop. // bool optComputeLoopSideEffectsOfBlock(BasicBlock* blk); // Hoist the expression "expr" out of loop "lnum". void optPerformHoistExpr(GenTree* expr, BasicBlock* exprBb, unsigned lnum); public: void optOptimizeBools(); public: PhaseStatus optInvertLoops(); // Invert loops so they're entered at top and tested at bottom. PhaseStatus optOptimizeLayout(); // Optimize the BasicBlock layout of the method PhaseStatus optSetBlockWeights(); PhaseStatus optFindLoopsPhase(); // Finds loops and records them in the loop table void optFindLoops(); PhaseStatus optCloneLoops(); void optCloneLoop(unsigned loopInd, LoopCloneContext* context); void optEnsureUniqueHead(unsigned loopInd, weight_t ambientWeight); PhaseStatus optUnrollLoops(); // Unrolls loops (needs to have cost info) void optRemoveRedundantZeroInits(); protected: // This enumeration describes what is killed by a call. enum callInterf { CALLINT_NONE, // no interference (most helpers) CALLINT_REF_INDIRS, // kills GC ref indirections (SETFIELD OBJ) CALLINT_SCL_INDIRS, // kills non GC ref indirections (SETFIELD non-OBJ) CALLINT_ALL_INDIRS, // kills both GC ref and non GC ref indirections (SETFIELD STRUCT) CALLINT_ALL, // kills everything (normal method call) }; enum class FieldKindForVN { SimpleStatic, WithBaseAddr }; public: // A "LoopDsc" describes a ("natural") loop. We (currently) require the body of a loop to be a contiguous (in // bbNext order) sequence of basic blocks. (At times, we may require the blocks in a loop to be "properly numbered" // in bbNext order; we use comparisons on the bbNum to decide order.) // The blocks that define the body are // top <= entry <= bottom // The "head" of the loop is a block outside the loop that has "entry" as a successor. We only support loops with a // single 'head' block. The meanings of these blocks are given in the definitions below. Also see the picture at // Compiler::optFindNaturalLoops(). struct LoopDsc { BasicBlock* lpHead; // HEAD of the loop (not part of the looping of the loop) -- has ENTRY as a successor. BasicBlock* lpTop; // loop TOP (the back edge from lpBottom reaches here). Lexically first block (in bbNext // order) reachable in this loop. BasicBlock* lpEntry; // the ENTRY in the loop (in most cases TOP or BOTTOM) BasicBlock* lpBottom; // loop BOTTOM (from here we have a back edge to the TOP) BasicBlock* lpExit; // if a single exit loop this is the EXIT (in most cases BOTTOM) callInterf lpAsgCall; // "callInterf" for calls in the loop ALLVARSET_TP lpAsgVars; // set of vars assigned within the loop (all vars, not just tracked) varRefKinds lpAsgInds : 8; // set of inds modified within the loop LoopFlags lpFlags; unsigned char lpExitCnt; // number of exits from the loop unsigned char lpParent; // The index of the most-nested loop that completely contains this one, // or else BasicBlock::NOT_IN_LOOP if no such loop exists. unsigned char lpChild; // The index of a nested loop, or else BasicBlock::NOT_IN_LOOP if no child exists. // (Actually, an "immediately" nested loop -- // no other child of this loop is a parent of lpChild.) unsigned char lpSibling; // The index of another loop that is an immediate child of lpParent, // or else BasicBlock::NOT_IN_LOOP. One can enumerate all the children of a loop // by following "lpChild" then "lpSibling" links. bool lpLoopHasMemoryHavoc[MemoryKindCount]; // The loop contains an operation that we assume has arbitrary // memory side effects. If this is set, the fields below // may not be accurate (since they become irrelevant.) VARSET_TP lpVarInOut; // The set of variables that are IN or OUT during the execution of this loop VARSET_TP lpVarUseDef; // The set of variables that are USE or DEF during the execution of this loop // The following counts are used for hoisting profitability checks. int lpHoistedExprCount; // The register count for the non-FP expressions from inside this loop that have been // hoisted int lpLoopVarCount; // The register count for the non-FP LclVars that are read/written inside this loop int lpVarInOutCount; // The register count for the non-FP LclVars that are alive inside or across this loop int lpHoistedFPExprCount; // The register count for the FP expressions from inside this loop that have been // hoisted int lpLoopVarFPCount; // The register count for the FP LclVars that are read/written inside this loop int lpVarInOutFPCount; // The register count for the FP LclVars that are alive inside or across this loop typedef JitHashTable<CORINFO_FIELD_HANDLE, JitPtrKeyFuncs<struct CORINFO_FIELD_STRUCT_>, FieldKindForVN> FieldHandleSet; FieldHandleSet* lpFieldsModified; // This has entries for all static field and object instance fields modified // in the loop. typedef JitHashTable<CORINFO_CLASS_HANDLE, JitPtrKeyFuncs<struct CORINFO_CLASS_STRUCT_>, bool> ClassHandleSet; ClassHandleSet* lpArrayElemTypesModified; // Bits set indicate the set of sz array element types such that // arrays of that type are modified // in the loop. // Adds the variable liveness information for 'blk' to 'this' LoopDsc void AddVariableLiveness(Compiler* comp, BasicBlock* blk); inline void AddModifiedField(Compiler* comp, CORINFO_FIELD_HANDLE fldHnd, FieldKindForVN fieldKind); // This doesn't *always* take a class handle -- it can also take primitive types, encoded as class handles // (shifted left, with a low-order bit set to distinguish.) // Use the {Encode/Decode}ElemType methods to construct/destruct these. inline void AddModifiedElemType(Compiler* comp, CORINFO_CLASS_HANDLE structHnd); /* The following values are set only for iterator loops, i.e. has the flag LPFLG_ITER set */ GenTree* lpIterTree; // The "i = i <op> const" tree unsigned lpIterVar() const; // iterator variable # int lpIterConst() const; // the constant with which the iterator is incremented genTreeOps lpIterOper() const; // the type of the operation on the iterator (ASG_ADD, ASG_SUB, etc.) void VERIFY_lpIterTree() const; var_types lpIterOperType() const; // For overflow instructions // Set to the block where we found the initialization for LPFLG_CONST_INIT or LPFLG_VAR_INIT loops. // Initially, this will be 'head', but 'head' might change if we insert a loop pre-header block. BasicBlock* lpInitBlock; union { int lpConstInit; // initial constant value of iterator // : Valid if LPFLG_CONST_INIT unsigned lpVarInit; // initial local var number to which we initialize the iterator // : Valid if LPFLG_VAR_INIT }; // The following is for LPFLG_ITER loops only (i.e. the loop condition is "i RELOP const or var") GenTree* lpTestTree; // pointer to the node containing the loop test genTreeOps lpTestOper() const; // the type of the comparison between the iterator and the limit (GT_LE, GT_GE, // etc.) void VERIFY_lpTestTree() const; bool lpIsReversed() const; // true if the iterator node is the second operand in the loop condition GenTree* lpIterator() const; // the iterator node in the loop test GenTree* lpLimit() const; // the limit node in the loop test // Limit constant value of iterator - loop condition is "i RELOP const" // : Valid if LPFLG_CONST_LIMIT int lpConstLimit() const; // The lclVar # in the loop condition ( "i RELOP lclVar" ) // : Valid if LPFLG_VAR_LIMIT unsigned lpVarLimit() const; // The array length in the loop condition ( "i RELOP arr.len" or "i RELOP arr[i][j].len" ) // : Valid if LPFLG_ARRLEN_LIMIT bool lpArrLenLimit(Compiler* comp, ArrIndex* index) const; // Returns "true" iff this is a "top entry" loop. bool lpIsTopEntry() const { if (lpHead->bbNext == lpEntry) { assert(lpHead->bbFallsThrough()); assert(lpTop == lpEntry); return true; } else { return false; } } // Returns "true" iff "*this" contains the blk. bool lpContains(BasicBlock* blk) const { return lpTop->bbNum <= blk->bbNum && blk->bbNum <= lpBottom->bbNum; } // Returns "true" iff "*this" (properly) contains the range [top, bottom] (allowing tops // to be equal, but requiring bottoms to be different.) bool lpContains(BasicBlock* top, BasicBlock* bottom) const { return lpTop->bbNum <= top->bbNum && bottom->bbNum < lpBottom->bbNum; } // Returns "true" iff "*this" (properly) contains "lp2" (allowing tops to be equal, but requiring // bottoms to be different.) bool lpContains(const LoopDsc& lp2) const { return lpContains(lp2.lpTop, lp2.lpBottom); } // Returns "true" iff "*this" is (properly) contained by the range [top, bottom] // (allowing tops to be equal, but requiring bottoms to be different.) bool lpContainedBy(BasicBlock* top, BasicBlock* bottom) const { return top->bbNum <= lpTop->bbNum && lpBottom->bbNum < bottom->bbNum; } // Returns "true" iff "*this" is (properly) contained by "lp2" // (allowing tops to be equal, but requiring bottoms to be different.) bool lpContainedBy(const LoopDsc& lp2) const { return lpContainedBy(lp2.lpTop, lp2.lpBottom); } // Returns "true" iff "*this" is disjoint from the range [top, bottom]. bool lpDisjoint(BasicBlock* top, BasicBlock* bottom) const { return bottom->bbNum < lpTop->bbNum || lpBottom->bbNum < top->bbNum; } // Returns "true" iff "*this" is disjoint from "lp2". bool lpDisjoint(const LoopDsc& lp2) const { return lpDisjoint(lp2.lpTop, lp2.lpBottom); } // Returns "true" iff the loop is well-formed (see code for defn). bool lpWellFormed() const { return lpTop->bbNum <= lpEntry->bbNum && lpEntry->bbNum <= lpBottom->bbNum && (lpHead->bbNum < lpTop->bbNum || lpHead->bbNum > lpBottom->bbNum); } #ifdef DEBUG void lpValidatePreHeader() const { // If this is called, we expect there to be a pre-header. assert(lpFlags & LPFLG_HAS_PREHEAD); // The pre-header must unconditionally enter the loop. assert(lpHead->GetUniqueSucc() == lpEntry); // The loop block must be marked as a pre-header. assert(lpHead->bbFlags & BBF_LOOP_PREHEADER); // The loop entry must have a single non-loop predecessor, which is the pre-header. // We can't assume here that the bbNum are properly ordered, so we can't do a simple lpContained() // check. So, we defer this check, which will be done by `fgDebugCheckLoopTable()`. } #endif // DEBUG // LoopBlocks: convenience method for enabling range-based `for` iteration over all the // blocks in a loop, e.g.: // for (BasicBlock* const block : loop->LoopBlocks()) ... // Currently, the loop blocks are expected to be in linear, lexical, `bbNext` order // from `lpTop` through `lpBottom`, inclusive. All blocks in this range are considered // to be part of the loop. // BasicBlockRangeList LoopBlocks() const { return BasicBlockRangeList(lpTop, lpBottom); } }; protected: bool fgMightHaveLoop(); // returns true if there are any back edges bool fgHasLoops; // True if this method has any loops, set in fgComputeReachability public: LoopDsc* optLoopTable; // loop descriptor table unsigned char optLoopCount; // number of tracked loops unsigned char loopAlignCandidates; // number of loops identified for alignment // Every time we rebuild the loop table, we increase the global "loop epoch". Any loop indices or // loop table pointers from the previous epoch are invalid. // TODO: validate this in some way? unsigned optCurLoopEpoch; void NewLoopEpoch() { ++optCurLoopEpoch; JITDUMP("New loop epoch %d\n", optCurLoopEpoch); } #ifdef DEBUG unsigned char loopsAligned; // number of loops actually aligned #endif // DEBUG bool optRecordLoop(BasicBlock* head, BasicBlock* top, BasicBlock* entry, BasicBlock* bottom, BasicBlock* exit, unsigned char exitCnt); void optClearLoopIterInfo(); #ifdef DEBUG void optPrintLoopInfo(unsigned lnum, bool printVerbose = false); void optPrintLoopInfo(const LoopDsc* loop, bool printVerbose = false); void optPrintLoopTable(); #endif protected: unsigned optCallCount; // number of calls made in the method unsigned optIndirectCallCount; // number of virtual, interface and indirect calls made in the method unsigned optNativeCallCount; // number of Pinvoke/Native calls made in the method unsigned optLoopsCloned; // number of loops cloned in the current method. #ifdef DEBUG void optCheckPreds(); #endif void optResetLoopInfo(); void optFindAndScaleGeneralLoopBlocks(); // Determine if there are any potential loops, and set BBF_LOOP_HEAD on potential loop heads. void optMarkLoopHeads(); void optScaleLoopBlocks(BasicBlock* begBlk, BasicBlock* endBlk); void optUnmarkLoopBlocks(BasicBlock* begBlk, BasicBlock* endBlk); void optUpdateLoopsBeforeRemoveBlock(BasicBlock* block, bool skipUnmarkLoop = false); bool optIsLoopTestEvalIntoTemp(Statement* testStmt, Statement** newTestStmt); unsigned optIsLoopIncrTree(GenTree* incr); bool optCheckIterInLoopTest(unsigned loopInd, GenTree* test, BasicBlock* from, BasicBlock* to, unsigned iterVar); bool optComputeIterInfo(GenTree* incr, BasicBlock* from, BasicBlock* to, unsigned* pIterVar); bool optPopulateInitInfo(unsigned loopInd, BasicBlock* initBlock, GenTree* init, unsigned iterVar); bool optExtractInitTestIncr( BasicBlock* head, BasicBlock* bottom, BasicBlock* exit, GenTree** ppInit, GenTree** ppTest, GenTree** ppIncr); void optFindNaturalLoops(); void optIdentifyLoopsForAlignment(); // Ensures that all the loops in the loop nest rooted at "loopInd" (an index into the loop table) are 'canonical' -- // each loop has a unique "top." Returns "true" iff the flowgraph has been modified. bool optCanonicalizeLoopNest(unsigned char loopInd); // Ensures that the loop "loopInd" (an index into the loop table) is 'canonical' -- it has a unique "top," // unshared with any other loop. Returns "true" iff the flowgraph has been modified bool optCanonicalizeLoop(unsigned char loopInd); // Requires "l1" to be a valid loop table index, and not "BasicBlock::NOT_IN_LOOP". // Requires "l2" to be a valid loop table index, or else "BasicBlock::NOT_IN_LOOP". // Returns true iff "l2" is not NOT_IN_LOOP, and "l1" contains "l2". // A loop contains itself. bool optLoopContains(unsigned l1, unsigned l2) const; // Updates the loop table by changing loop "loopInd", whose head is required // to be "from", to be "to". Also performs this transformation for any // loop nested in "loopInd" that shares the same head as "loopInd". void optUpdateLoopHead(unsigned loopInd, BasicBlock* from, BasicBlock* to); void optRedirectBlock(BasicBlock* blk, BlockToBlockMap* redirectMap, const bool updatePreds = false); // Marks the containsCall information to "lnum" and any parent loops. void AddContainsCallAllContainingLoops(unsigned lnum); // Adds the variable liveness information from 'blk' to "lnum" and any parent loops. void AddVariableLivenessAllContainingLoops(unsigned lnum, BasicBlock* blk); // Adds "fldHnd" to the set of modified fields of "lnum" and any parent loops. void AddModifiedFieldAllContainingLoops(unsigned lnum, CORINFO_FIELD_HANDLE fldHnd, FieldKindForVN fieldKind); // Adds "elemType" to the set of modified array element types of "lnum" and any parent loops. void AddModifiedElemTypeAllContainingLoops(unsigned lnum, CORINFO_CLASS_HANDLE elemType); // Requires that "from" and "to" have the same "bbJumpKind" (perhaps because "to" is a clone // of "from".) Copies the jump destination from "from" to "to". void optCopyBlkDest(BasicBlock* from, BasicBlock* to); // Returns true if 'block' is an entry block for any loop in 'optLoopTable' bool optIsLoopEntry(BasicBlock* block) const; // The depth of the loop described by "lnum" (an index into the loop table.) (0 == top level) unsigned optLoopDepth(unsigned lnum) { assert(lnum < optLoopCount); unsigned depth = 0; while ((lnum = optLoopTable[lnum].lpParent) != BasicBlock::NOT_IN_LOOP) { ++depth; } return depth; } // Struct used in optInvertWhileLoop to count interesting constructs to boost the profitability score. struct OptInvertCountTreeInfoType { int sharedStaticHelperCount; int arrayLengthCount; }; static fgWalkResult optInvertCountTreeInfo(GenTree** pTree, fgWalkData* data); bool optInvertWhileLoop(BasicBlock* block); private: static bool optIterSmallOverflow(int iterAtExit, var_types incrType); static bool optIterSmallUnderflow(int iterAtExit, var_types decrType); bool optComputeLoopRep(int constInit, int constLimit, int iterInc, genTreeOps iterOper, var_types iterType, genTreeOps testOper, bool unsignedTest, bool dupCond, unsigned* iterCount); static fgWalkPreFn optIsVarAssgCB; protected: bool optIsVarAssigned(BasicBlock* beg, BasicBlock* end, GenTree* skip, unsigned var); bool optIsVarAssgLoop(unsigned lnum, unsigned var); int optIsSetAssgLoop(unsigned lnum, ALLVARSET_VALARG_TP vars, varRefKinds inds = VR_NONE); bool optNarrowTree(GenTree* tree, var_types srct, var_types dstt, ValueNumPair vnpNarrow, bool doit); protected: // The following is the upper limit on how many expressions we'll keep track // of for the CSE analysis. // static const unsigned MAX_CSE_CNT = EXPSET_SZ; static const int MIN_CSE_COST = 2; // BitVec trait information only used by the optCSE_canSwap() method, for the CSE_defMask and CSE_useMask. // This BitVec uses one bit per CSE candidate BitVecTraits* cseMaskTraits; // one bit per CSE candidate // BitVec trait information for computing CSE availability using the CSE_DataFlow algorithm. // Two bits are allocated per CSE candidate to compute CSE availability // plus an extra bit to handle the initial unvisited case. // (See CSE_DataFlow::EndMerge for an explanation of why this is necessary.) // // The two bits per CSE candidate have the following meanings: // 11 - The CSE is available, and is also available when considering calls as killing availability. // 10 - The CSE is available, but is not available when considering calls as killing availability. // 00 - The CSE is not available // 01 - An illegal combination // BitVecTraits* cseLivenessTraits; //----------------------------------------------------------------------------------------------------------------- // getCSEnum2bit: Return the normalized index to use in the EXPSET_TP for the CSE with the given CSE index. // Each GenTree has a `gtCSEnum` field. Zero is reserved to mean this node is not a CSE, positive values indicate // CSE uses, and negative values indicate CSE defs. The caller must pass a non-zero positive value, as from // GET_CSE_INDEX(). // static unsigned genCSEnum2bit(unsigned CSEnum) { assert((CSEnum > 0) && (CSEnum <= MAX_CSE_CNT)); return CSEnum - 1; } //----------------------------------------------------------------------------------------------------------------- // getCSEAvailBit: Return the bit used by CSE dataflow sets (bbCseGen, etc.) for the availability bit for a CSE. // static unsigned getCSEAvailBit(unsigned CSEnum) { return genCSEnum2bit(CSEnum) * 2; } //----------------------------------------------------------------------------------------------------------------- // getCSEAvailCrossCallBit: Return the bit used by CSE dataflow sets (bbCseGen, etc.) for the availability bit // for a CSE considering calls as killing availability bit (see description above). // static unsigned getCSEAvailCrossCallBit(unsigned CSEnum) { return getCSEAvailBit(CSEnum) + 1; } void optPrintCSEDataFlowSet(EXPSET_VALARG_TP cseDataFlowSet, bool includeBits = true); EXPSET_TP cseCallKillsMask; // Computed once - A mask that is used to kill available CSEs at callsites /* Generic list of nodes - used by the CSE logic */ struct treeLst { treeLst* tlNext; GenTree* tlTree; }; struct treeStmtLst { treeStmtLst* tslNext; GenTree* tslTree; // tree node Statement* tslStmt; // statement containing the tree BasicBlock* tslBlock; // block containing the statement }; // The following logic keeps track of expressions via a simple hash table. struct CSEdsc { CSEdsc* csdNextInBucket; // used by the hash table size_t csdHashKey; // the orginal hashkey ssize_t csdConstDefValue; // When we CSE similar constants, this is the value that we use as the def ValueNum csdConstDefVN; // When we CSE similar constants, this is the ValueNumber that we use for the LclVar // assignment unsigned csdIndex; // 1..optCSECandidateCount bool csdIsSharedConst; // true if this CSE is a shared const bool csdLiveAcrossCall; unsigned short csdDefCount; // definition count unsigned short csdUseCount; // use count (excluding the implicit uses at defs) weight_t csdDefWtCnt; // weighted def count weight_t csdUseWtCnt; // weighted use count (excluding the implicit uses at defs) GenTree* csdTree; // treenode containing the 1st occurrence Statement* csdStmt; // stmt containing the 1st occurrence BasicBlock* csdBlock; // block containing the 1st occurrence treeStmtLst* csdTreeList; // list of matching tree nodes: head treeStmtLst* csdTreeLast; // list of matching tree nodes: tail // ToDo: This can be removed when gtGetStructHandleIfPresent stops guessing // and GT_IND nodes always have valid struct handle. // CORINFO_CLASS_HANDLE csdStructHnd; // The class handle, currently needed to create a SIMD LclVar in PerformCSE bool csdStructHndMismatch; ValueNum defExcSetPromise; // The exception set that is now required for all defs of this CSE. // This will be set to NoVN if we decide to abandon this CSE ValueNum defExcSetCurrent; // The set of exceptions we currently can use for CSE uses. ValueNum defConservNormVN; // if all def occurrences share the same conservative normal value // number, this will reflect it; otherwise, NoVN. // not used for shared const CSE's }; static const size_t s_optCSEhashSizeInitial; static const size_t s_optCSEhashGrowthFactor; static const size_t s_optCSEhashBucketSize; size_t optCSEhashSize; // The current size of hashtable size_t optCSEhashCount; // Number of entries in hashtable size_t optCSEhashMaxCountBeforeResize; // Number of entries before resize CSEdsc** optCSEhash; CSEdsc** optCSEtab; typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, GenTree*> NodeToNodeMap; NodeToNodeMap* optCseCheckedBoundMap; // Maps bound nodes to ancestor compares that should be // re-numbered with the bound to improve range check elimination // Given a compare, look for a cse candidate checked bound feeding it and add a map entry if found. void optCseUpdateCheckedBoundMap(GenTree* compare); void optCSEstop(); CSEdsc* optCSEfindDsc(unsigned index); bool optUnmarkCSE(GenTree* tree); // user defined callback data for the tree walk function optCSE_MaskHelper() struct optCSE_MaskData { EXPSET_TP CSE_defMask; EXPSET_TP CSE_useMask; }; // Treewalk helper for optCSE_DefMask and optCSE_UseMask static fgWalkPreFn optCSE_MaskHelper; // This function walks all the node for an given tree // and return the mask of CSE definitions and uses for the tree // void optCSE_GetMaskData(GenTree* tree, optCSE_MaskData* pMaskData); // Given a binary tree node return true if it is safe to swap the order of evaluation for op1 and op2. bool optCSE_canSwap(GenTree* firstNode, GenTree* secondNode); struct optCSEcostCmpEx { bool operator()(const CSEdsc* op1, const CSEdsc* op2); }; struct optCSEcostCmpSz { bool operator()(const CSEdsc* op1, const CSEdsc* op2); }; void optCleanupCSEs(); #ifdef DEBUG void optEnsureClearCSEInfo(); #endif // DEBUG static bool Is_Shared_Const_CSE(size_t key) { return ((key & TARGET_SIGN_BIT) != 0); } // returns the encoded key static size_t Encode_Shared_Const_CSE_Value(size_t key) { return TARGET_SIGN_BIT | (key >> CSE_CONST_SHARED_LOW_BITS); } // returns the orginal key static size_t Decode_Shared_Const_CSE_Value(size_t enckey) { assert(Is_Shared_Const_CSE(enckey)); return (enckey & ~TARGET_SIGN_BIT) << CSE_CONST_SHARED_LOW_BITS; } /************************************************************************** * Value Number based CSEs *************************************************************************/ // String to use for formatting CSE numbers. Note that this is the positive number, e.g., from GET_CSE_INDEX(). #define FMT_CSE "CSE #%02u" public: void optOptimizeValnumCSEs(); protected: void optValnumCSE_Init(); unsigned optValnumCSE_Index(GenTree* tree, Statement* stmt); bool optValnumCSE_Locate(); void optValnumCSE_InitDataFlow(); void optValnumCSE_DataFlow(); void optValnumCSE_Availablity(); void optValnumCSE_Heuristic(); bool optDoCSE; // True when we have found a duplicate CSE tree bool optValnumCSE_phase; // True when we are executing the optOptimizeValnumCSEs() phase unsigned optCSECandidateCount; // Count of CSE's candidates unsigned optCSEstart; // The first local variable number that is a CSE unsigned optCSEcount; // The total count of CSE's introduced. weight_t optCSEweight; // The weight of the current block when we are doing PerformCSE bool optIsCSEcandidate(GenTree* tree); // lclNumIsTrueCSE returns true if the LclVar was introduced by the CSE phase of the compiler // bool lclNumIsTrueCSE(unsigned lclNum) const { return ((optCSEcount > 0) && (lclNum >= optCSEstart) && (lclNum < optCSEstart + optCSEcount)); } // lclNumIsCSE returns true if the LclVar should be treated like a CSE with regards to constant prop. // bool lclNumIsCSE(unsigned lclNum) const { return lvaGetDesc(lclNum)->lvIsCSE; } #ifdef DEBUG bool optConfigDisableCSE(); bool optConfigDisableCSE2(); #endif void optOptimizeCSEs(); struct isVarAssgDsc { GenTree* ivaSkip; ALLVARSET_TP ivaMaskVal; // Set of variables assigned to. This is a set of all vars, not tracked vars. #ifdef DEBUG void* ivaSelf; #endif unsigned ivaVar; // Variable we are interested in, or -1 varRefKinds ivaMaskInd; // What kind of indirect assignments are there? callInterf ivaMaskCall; // What kind of calls are there? bool ivaMaskIncomplete; // Variables not representable in ivaMaskVal were assigned to. }; static callInterf optCallInterf(GenTreeCall* call); public: // VN based copy propagation. // In DEBUG builds, we'd like to know the tree that the SSA definition was pushed for. // While for ordinary SSA defs it will be available (as an ASG) in the SSA descriptor, // for locals which will use "definitions from uses", it will not be, so we store it // in this class instead. class CopyPropSsaDef { LclSsaVarDsc* m_ssaDef; #ifdef DEBUG GenTree* m_defNode; #endif public: CopyPropSsaDef(LclSsaVarDsc* ssaDef, GenTree* defNode) : m_ssaDef(ssaDef) #ifdef DEBUG , m_defNode(defNode) #endif { } LclSsaVarDsc* GetSsaDef() const { return m_ssaDef; } #ifdef DEBUG GenTree* GetDefNode() const { return m_defNode; } #endif }; typedef ArrayStack<CopyPropSsaDef> CopyPropSsaDefStack; typedef JitHashTable<unsigned, JitSmallPrimitiveKeyFuncs<unsigned>, CopyPropSsaDefStack*> LclNumToLiveDefsMap; // Copy propagation functions. void optCopyProp(Statement* stmt, GenTreeLclVarCommon* tree, unsigned lclNum, LclNumToLiveDefsMap* curSsaName); void optBlockCopyPropPopStacks(BasicBlock* block, LclNumToLiveDefsMap* curSsaName); void optBlockCopyProp(BasicBlock* block, LclNumToLiveDefsMap* curSsaName); void optCopyPropPushDef(GenTreeOp* asg, GenTreeLclVarCommon* lclNode, unsigned lclNum, LclNumToLiveDefsMap* curSsaName); unsigned optIsSsaLocal(GenTreeLclVarCommon* lclNode); int optCopyProp_LclVarScore(const LclVarDsc* lclVarDsc, const LclVarDsc* copyVarDsc, bool preferOp2); void optVnCopyProp(); INDEBUG(void optDumpCopyPropStack(LclNumToLiveDefsMap* curSsaName)); /************************************************************************** * Early value propagation *************************************************************************/ struct SSAName { unsigned m_lvNum; unsigned m_ssaNum; SSAName(unsigned lvNum, unsigned ssaNum) : m_lvNum(lvNum), m_ssaNum(ssaNum) { } static unsigned GetHashCode(SSAName ssaNm) { return (ssaNm.m_lvNum << 16) | (ssaNm.m_ssaNum); } static bool Equals(SSAName ssaNm1, SSAName ssaNm2) { return (ssaNm1.m_lvNum == ssaNm2.m_lvNum) && (ssaNm1.m_ssaNum == ssaNm2.m_ssaNum); } }; #define OMF_HAS_NEWARRAY 0x00000001 // Method contains 'new' of an array #define OMF_HAS_NEWOBJ 0x00000002 // Method contains 'new' of an object type. #define OMF_HAS_ARRAYREF 0x00000004 // Method contains array element loads or stores. #define OMF_HAS_NULLCHECK 0x00000008 // Method contains null check. #define OMF_HAS_FATPOINTER 0x00000010 // Method contains call, that needs fat pointer transformation. #define OMF_HAS_OBJSTACKALLOC 0x00000020 // Method contains an object allocated on the stack. #define OMF_HAS_GUARDEDDEVIRT 0x00000040 // Method contains guarded devirtualization candidate #define OMF_HAS_EXPRUNTIMELOOKUP 0x00000080 // Method contains a runtime lookup to an expandable dictionary. #define OMF_HAS_PATCHPOINT 0x00000100 // Method contains patchpoints #define OMF_NEEDS_GCPOLLS 0x00000200 // Method needs GC polls #define OMF_HAS_FROZEN_STRING 0x00000400 // Method has a frozen string (REF constant int), currently only on CoreRT. #define OMF_HAS_PARTIAL_COMPILATION_PATCHPOINT 0x00000800 // Method contains partial compilation patchpoints #define OMF_HAS_TAILCALL_SUCCESSOR 0x00001000 // Method has potential tail call in a non BBJ_RETURN block bool doesMethodHaveFatPointer() { return (optMethodFlags & OMF_HAS_FATPOINTER) != 0; } void setMethodHasFatPointer() { optMethodFlags |= OMF_HAS_FATPOINTER; } void clearMethodHasFatPointer() { optMethodFlags &= ~OMF_HAS_FATPOINTER; } void addFatPointerCandidate(GenTreeCall* call); bool doesMethodHaveFrozenString() const { return (optMethodFlags & OMF_HAS_FROZEN_STRING) != 0; } void setMethodHasFrozenString() { optMethodFlags |= OMF_HAS_FROZEN_STRING; } bool doesMethodHaveGuardedDevirtualization() const { return (optMethodFlags & OMF_HAS_GUARDEDDEVIRT) != 0; } void setMethodHasGuardedDevirtualization() { optMethodFlags |= OMF_HAS_GUARDEDDEVIRT; } void clearMethodHasGuardedDevirtualization() { optMethodFlags &= ~OMF_HAS_GUARDEDDEVIRT; } void considerGuardedDevirtualization(GenTreeCall* call, IL_OFFSET ilOffset, bool isInterface, CORINFO_METHOD_HANDLE baseMethod, CORINFO_CLASS_HANDLE baseClass, CORINFO_CONTEXT_HANDLE* pContextHandle DEBUGARG(CORINFO_CLASS_HANDLE objClass) DEBUGARG(const char* objClassName)); void addGuardedDevirtualizationCandidate(GenTreeCall* call, CORINFO_METHOD_HANDLE methodHandle, CORINFO_CLASS_HANDLE classHandle, unsigned methodAttr, unsigned classAttr, unsigned likelihood); bool doesMethodHaveExpRuntimeLookup() { return (optMethodFlags & OMF_HAS_EXPRUNTIMELOOKUP) != 0; } void setMethodHasExpRuntimeLookup() { optMethodFlags |= OMF_HAS_EXPRUNTIMELOOKUP; } void clearMethodHasExpRuntimeLookup() { optMethodFlags &= ~OMF_HAS_EXPRUNTIMELOOKUP; } void addExpRuntimeLookupCandidate(GenTreeCall* call); bool doesMethodHavePatchpoints() { return (optMethodFlags & OMF_HAS_PATCHPOINT) != 0; } void setMethodHasPatchpoint() { optMethodFlags |= OMF_HAS_PATCHPOINT; } bool doesMethodHavePartialCompilationPatchpoints() { return (optMethodFlags & OMF_HAS_PARTIAL_COMPILATION_PATCHPOINT) != 0; } void setMethodHasPartialCompilationPatchpoint() { optMethodFlags |= OMF_HAS_PARTIAL_COMPILATION_PATCHPOINT; } unsigned optMethodFlags; bool doesMethodHaveNoReturnCalls() { return optNoReturnCallCount > 0; } void setMethodHasNoReturnCalls() { optNoReturnCallCount++; } unsigned optNoReturnCallCount; // Recursion bound controls how far we can go backwards tracking for a SSA value. // No throughput diff was found with backward walk bound between 3-8. static const int optEarlyPropRecurBound = 5; enum class optPropKind { OPK_INVALID, OPK_ARRAYLEN, OPK_NULLCHECK }; typedef JitHashTable<unsigned, JitSmallPrimitiveKeyFuncs<unsigned>, GenTree*> LocalNumberToNullCheckTreeMap; GenTree* getArrayLengthFromAllocation(GenTree* tree DEBUGARG(BasicBlock* block)); GenTree* optPropGetValueRec(unsigned lclNum, unsigned ssaNum, optPropKind valueKind, int walkDepth); GenTree* optPropGetValue(unsigned lclNum, unsigned ssaNum, optPropKind valueKind); GenTree* optEarlyPropRewriteTree(GenTree* tree, LocalNumberToNullCheckTreeMap* nullCheckMap); bool optDoEarlyPropForBlock(BasicBlock* block); bool optDoEarlyPropForFunc(); void optEarlyProp(); void optFoldNullCheck(GenTree* tree, LocalNumberToNullCheckTreeMap* nullCheckMap); GenTree* optFindNullCheckToFold(GenTree* tree, LocalNumberToNullCheckTreeMap* nullCheckMap); bool optIsNullCheckFoldingLegal(GenTree* tree, GenTree* nullCheckTree, GenTree** nullCheckParent, Statement** nullCheckStmt); bool optCanMoveNullCheckPastTree(GenTree* tree, unsigned nullCheckLclNum, bool isInsideTry, bool checkSideEffectSummary); #if DEBUG void optCheckFlagsAreSet(unsigned methodFlag, const char* methodFlagStr, unsigned bbFlag, const char* bbFlagStr, GenTree* tree, BasicBlock* basicBlock); #endif // Redundant branch opts // PhaseStatus optRedundantBranches(); bool optRedundantRelop(BasicBlock* const block); bool optRedundantBranch(BasicBlock* const block); bool optJumpThread(BasicBlock* const block, BasicBlock* const domBlock, bool domIsSameRelop); bool optReachable(BasicBlock* const fromBlock, BasicBlock* const toBlock, BasicBlock* const excludedBlock); /************************************************************************** * Value/Assertion propagation *************************************************************************/ public: // Data structures for assertion prop BitVecTraits* apTraits; ASSERT_TP apFull; enum optAssertionKind { OAK_INVALID, OAK_EQUAL, OAK_NOT_EQUAL, OAK_SUBRANGE, OAK_NO_THROW, OAK_COUNT }; enum optOp1Kind { O1K_INVALID, O1K_LCLVAR, O1K_ARR_BND, O1K_BOUND_OPER_BND, O1K_BOUND_LOOP_BND, O1K_CONSTANT_LOOP_BND, O1K_CONSTANT_LOOP_BND_UN, O1K_EXACT_TYPE, O1K_SUBTYPE, O1K_VALUE_NUMBER, O1K_COUNT }; enum optOp2Kind { O2K_INVALID, O2K_LCLVAR_COPY, O2K_IND_CNS_INT, O2K_CONST_INT, O2K_CONST_LONG, O2K_CONST_DOUBLE, O2K_ZEROOBJ, O2K_SUBRANGE, O2K_COUNT }; struct AssertionDsc { optAssertionKind assertionKind; struct SsaVar { unsigned lclNum; // assigned to or property of this local var number unsigned ssaNum; }; struct ArrBnd { ValueNum vnIdx; ValueNum vnLen; }; struct AssertionDscOp1 { optOp1Kind kind; // a normal LclVar, or Exact-type or Subtype ValueNum vn; union { SsaVar lcl; ArrBnd bnd; }; } op1; struct AssertionDscOp2 { optOp2Kind kind; // a const or copy assignment ValueNum vn; struct IntVal { ssize_t iconVal; // integer #if !defined(HOST_64BIT) unsigned padding; // unused; ensures iconFlags does not overlap lconVal #endif GenTreeFlags iconFlags; // gtFlags }; union { struct { SsaVar lcl; FieldSeqNode* zeroOffsetFieldSeq; }; IntVal u1; __int64 lconVal; double dconVal; IntegralRange u2; }; } op2; bool IsCheckedBoundArithBound() { return ((assertionKind == OAK_EQUAL || assertionKind == OAK_NOT_EQUAL) && op1.kind == O1K_BOUND_OPER_BND); } bool IsCheckedBoundBound() { return ((assertionKind == OAK_EQUAL || assertionKind == OAK_NOT_EQUAL) && op1.kind == O1K_BOUND_LOOP_BND); } bool IsConstantBound() { return ((assertionKind == OAK_EQUAL || assertionKind == OAK_NOT_EQUAL) && (op1.kind == O1K_CONSTANT_LOOP_BND)); } bool IsConstantBoundUnsigned() { return ((assertionKind == OAK_EQUAL || assertionKind == OAK_NOT_EQUAL) && (op1.kind == O1K_CONSTANT_LOOP_BND_UN)); } bool IsBoundsCheckNoThrow() { return ((assertionKind == OAK_NO_THROW) && (op1.kind == O1K_ARR_BND)); } bool IsCopyAssertion() { return ((assertionKind == OAK_EQUAL) && (op1.kind == O1K_LCLVAR) && (op2.kind == O2K_LCLVAR_COPY)); } bool IsConstantInt32Assertion() { return ((assertionKind == OAK_EQUAL) || (assertionKind == OAK_NOT_EQUAL)) && (op2.kind == O2K_CONST_INT); } static bool SameKind(AssertionDsc* a1, AssertionDsc* a2) { return a1->assertionKind == a2->assertionKind && a1->op1.kind == a2->op1.kind && a1->op2.kind == a2->op2.kind; } static bool ComplementaryKind(optAssertionKind kind, optAssertionKind kind2) { if (kind == OAK_EQUAL) { return kind2 == OAK_NOT_EQUAL; } else if (kind == OAK_NOT_EQUAL) { return kind2 == OAK_EQUAL; } return false; } bool HasSameOp1(AssertionDsc* that, bool vnBased) { if (op1.kind != that->op1.kind) { return false; } else if (op1.kind == O1K_ARR_BND) { assert(vnBased); return (op1.bnd.vnIdx == that->op1.bnd.vnIdx) && (op1.bnd.vnLen == that->op1.bnd.vnLen); } else { return ((vnBased && (op1.vn == that->op1.vn)) || (!vnBased && (op1.lcl.lclNum == that->op1.lcl.lclNum))); } } bool HasSameOp2(AssertionDsc* that, bool vnBased) { if (op2.kind != that->op2.kind) { return false; } switch (op2.kind) { case O2K_IND_CNS_INT: case O2K_CONST_INT: return ((op2.u1.iconVal == that->op2.u1.iconVal) && (op2.u1.iconFlags == that->op2.u1.iconFlags)); case O2K_CONST_LONG: return (op2.lconVal == that->op2.lconVal); case O2K_CONST_DOUBLE: // exact match because of positive and negative zero. return (memcmp(&op2.dconVal, &that->op2.dconVal, sizeof(double)) == 0); case O2K_ZEROOBJ: return true; case O2K_LCLVAR_COPY: return (op2.lcl.lclNum == that->op2.lcl.lclNum) && (!vnBased || op2.lcl.ssaNum == that->op2.lcl.ssaNum) && (op2.zeroOffsetFieldSeq == that->op2.zeroOffsetFieldSeq); case O2K_SUBRANGE: return op2.u2.Equals(that->op2.u2); case O2K_INVALID: // we will return false break; default: assert(!"Unexpected value for op2.kind in AssertionDsc."); break; } return false; } bool Complementary(AssertionDsc* that, bool vnBased) { return ComplementaryKind(assertionKind, that->assertionKind) && HasSameOp1(that, vnBased) && HasSameOp2(that, vnBased); } bool Equals(AssertionDsc* that, bool vnBased) { if (assertionKind != that->assertionKind) { return false; } else if (assertionKind == OAK_NO_THROW) { assert(op2.kind == O2K_INVALID); return HasSameOp1(that, vnBased); } else { return HasSameOp1(that, vnBased) && HasSameOp2(that, vnBased); } } }; protected: static fgWalkPreFn optAddCopiesCallback; static fgWalkPreFn optVNAssertionPropCurStmtVisitor; unsigned optAddCopyLclNum; GenTree* optAddCopyAsgnNode; bool optLocalAssertionProp; // indicates that we are performing local assertion prop bool optAssertionPropagated; // set to true if we modified the trees bool optAssertionPropagatedCurrentStmt; #ifdef DEBUG GenTree* optAssertionPropCurrentTree; #endif AssertionIndex* optComplementaryAssertionMap; JitExpandArray<ASSERT_TP>* optAssertionDep; // table that holds dependent assertions (assertions // using the value of a local var) for each local var AssertionDsc* optAssertionTabPrivate; // table that holds info about value assignments AssertionIndex optAssertionCount; // total number of assertions in the assertion table AssertionIndex optMaxAssertionCount; public: void optVnNonNullPropCurStmt(BasicBlock* block, Statement* stmt, GenTree* tree); fgWalkResult optVNConstantPropCurStmt(BasicBlock* block, Statement* stmt, GenTree* tree); GenTree* optVNConstantPropOnJTrue(BasicBlock* block, GenTree* test); GenTree* optVNConstantPropOnTree(BasicBlock* block, GenTree* tree); GenTree* optExtractSideEffListFromConst(GenTree* tree); AssertionIndex GetAssertionCount() { return optAssertionCount; } ASSERT_TP* bbJtrueAssertionOut; typedef JitHashTable<ValueNum, JitSmallPrimitiveKeyFuncs<ValueNum>, ASSERT_TP> ValueNumToAssertsMap; ValueNumToAssertsMap* optValueNumToAsserts; // Assertion prop helpers. ASSERT_TP& GetAssertionDep(unsigned lclNum); AssertionDsc* optGetAssertion(AssertionIndex assertIndex); void optAssertionInit(bool isLocalProp); void optAssertionTraitsInit(AssertionIndex assertionCount); void optAssertionReset(AssertionIndex limit); void optAssertionRemove(AssertionIndex index); // Assertion prop data flow functions. void optAssertionPropMain(); Statement* optVNAssertionPropCurStmt(BasicBlock* block, Statement* stmt); bool optIsTreeKnownIntValue(bool vnBased, GenTree* tree, ssize_t* pConstant, GenTreeFlags* pIconFlags); ASSERT_TP* optInitAssertionDataflowFlags(); ASSERT_TP* optComputeAssertionGen(); // Assertion Gen functions. void optAssertionGen(GenTree* tree); AssertionIndex optAssertionGenCast(GenTreeCast* cast); AssertionIndex optAssertionGenPhiDefn(GenTree* tree); AssertionInfo optCreateJTrueBoundsAssertion(GenTree* tree); AssertionInfo optAssertionGenJtrue(GenTree* tree); AssertionIndex optCreateJtrueAssertions(GenTree* op1, GenTree* op2, Compiler::optAssertionKind assertionKind, bool helperCallArgs = false); AssertionIndex optFindComplementary(AssertionIndex assertionIndex); void optMapComplementary(AssertionIndex assertionIndex, AssertionIndex index); // Assertion creation functions. AssertionIndex optCreateAssertion(GenTree* op1, GenTree* op2, optAssertionKind assertionKind, bool helperCallArgs = false); AssertionIndex optFinalizeCreatingAssertion(AssertionDsc* assertion); bool optTryExtractSubrangeAssertion(GenTree* source, IntegralRange* pRange); void optCreateComplementaryAssertion(AssertionIndex assertionIndex, GenTree* op1, GenTree* op2, bool helperCallArgs = false); bool optAssertionVnInvolvesNan(AssertionDsc* assertion); AssertionIndex optAddAssertion(AssertionDsc* assertion); void optAddVnAssertionMapping(ValueNum vn, AssertionIndex index); #ifdef DEBUG void optPrintVnAssertionMapping(); #endif ASSERT_TP optGetVnMappedAssertions(ValueNum vn); // Used for respective assertion propagations. AssertionIndex optAssertionIsSubrange(GenTree* tree, IntegralRange range, ASSERT_VALARG_TP assertions); AssertionIndex optAssertionIsSubtype(GenTree* tree, GenTree* methodTableArg, ASSERT_VALARG_TP assertions); AssertionIndex optAssertionIsNonNullInternal(GenTree* op, ASSERT_VALARG_TP assertions DEBUGARG(bool* pVnBased)); bool optAssertionIsNonNull(GenTree* op, ASSERT_VALARG_TP assertions DEBUGARG(bool* pVnBased) DEBUGARG(AssertionIndex* pIndex)); AssertionIndex optGlobalAssertionIsEqualOrNotEqual(ASSERT_VALARG_TP assertions, GenTree* op1, GenTree* op2); AssertionIndex optGlobalAssertionIsEqualOrNotEqualZero(ASSERT_VALARG_TP assertions, GenTree* op1); AssertionIndex optLocalAssertionIsEqualOrNotEqual( optOp1Kind op1Kind, unsigned lclNum, optOp2Kind op2Kind, ssize_t cnsVal, ASSERT_VALARG_TP assertions); // Assertion prop for lcl var functions. bool optAssertionProp_LclVarTypeCheck(GenTree* tree, LclVarDsc* lclVarDsc, LclVarDsc* copyVarDsc); GenTree* optCopyAssertionProp(AssertionDsc* curAssertion, GenTreeLclVarCommon* tree, Statement* stmt DEBUGARG(AssertionIndex index)); GenTree* optConstantAssertionProp(AssertionDsc* curAssertion, GenTreeLclVarCommon* tree, Statement* stmt DEBUGARG(AssertionIndex index)); bool optZeroObjAssertionProp(GenTree* tree, ASSERT_VALARG_TP assertions); // Assertion propagation functions. GenTree* optAssertionProp(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt, BasicBlock* block); GenTree* optAssertionProp_LclVar(ASSERT_VALARG_TP assertions, GenTreeLclVarCommon* tree, Statement* stmt); GenTree* optAssertionProp_Asg(ASSERT_VALARG_TP assertions, GenTreeOp* asg, Statement* stmt); GenTree* optAssertionProp_Return(ASSERT_VALARG_TP assertions, GenTreeUnOp* ret, Statement* stmt); GenTree* optAssertionProp_Ind(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt); GenTree* optAssertionProp_Cast(ASSERT_VALARG_TP assertions, GenTreeCast* cast, Statement* stmt); GenTree* optAssertionProp_Call(ASSERT_VALARG_TP assertions, GenTreeCall* call, Statement* stmt); GenTree* optAssertionProp_RelOp(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt); GenTree* optAssertionProp_Comma(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt); GenTree* optAssertionProp_BndsChk(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt); GenTree* optAssertionPropGlobal_RelOp(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt); GenTree* optAssertionPropLocal_RelOp(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt); GenTree* optAssertionProp_Update(GenTree* newTree, GenTree* tree, Statement* stmt); GenTree* optNonNullAssertionProp_Call(ASSERT_VALARG_TP assertions, GenTreeCall* call); // Implied assertion functions. void optImpliedAssertions(AssertionIndex assertionIndex, ASSERT_TP& activeAssertions); void optImpliedByTypeOfAssertions(ASSERT_TP& activeAssertions); void optImpliedByCopyAssertion(AssertionDsc* copyAssertion, AssertionDsc* depAssertion, ASSERT_TP& result); void optImpliedByConstAssertion(AssertionDsc* curAssertion, ASSERT_TP& result); #ifdef DEBUG void optPrintAssertion(AssertionDsc* newAssertion, AssertionIndex assertionIndex = 0); void optPrintAssertionIndex(AssertionIndex index); void optPrintAssertionIndices(ASSERT_TP assertions); void optDebugCheckAssertion(AssertionDsc* assertion); void optDebugCheckAssertions(AssertionIndex AssertionIndex); #endif static void optDumpAssertionIndices(const char* header, ASSERT_TP assertions, const char* footer = nullptr); static void optDumpAssertionIndices(ASSERT_TP assertions, const char* footer = nullptr); void optAddCopies(); /************************************************************************** * Range checks *************************************************************************/ public: struct LoopCloneVisitorInfo { LoopCloneContext* context; unsigned loopNum; Statement* stmt; LoopCloneVisitorInfo(LoopCloneContext* context, unsigned loopNum, Statement* stmt) : context(context), loopNum(loopNum), stmt(nullptr) { } }; bool optIsStackLocalInvariant(unsigned loopNum, unsigned lclNum); bool optExtractArrIndex(GenTree* tree, ArrIndex* result, unsigned lhsNum); bool optReconstructArrIndex(GenTree* tree, ArrIndex* result, unsigned lhsNum); bool optIdentifyLoopOptInfo(unsigned loopNum, LoopCloneContext* context); static fgWalkPreFn optCanOptimizeByLoopCloningVisitor; fgWalkResult optCanOptimizeByLoopCloning(GenTree* tree, LoopCloneVisitorInfo* info); bool optObtainLoopCloningOpts(LoopCloneContext* context); bool optIsLoopClonable(unsigned loopInd); bool optLoopCloningEnabled(); #ifdef DEBUG void optDebugLogLoopCloning(BasicBlock* block, Statement* insertBefore); #endif void optPerformStaticOptimizations(unsigned loopNum, LoopCloneContext* context DEBUGARG(bool fastPath)); bool optComputeDerefConditions(unsigned loopNum, LoopCloneContext* context); bool optDeriveLoopCloningConditions(unsigned loopNum, LoopCloneContext* context); BasicBlock* optInsertLoopChoiceConditions(LoopCloneContext* context, unsigned loopNum, BasicBlock* slowHead, BasicBlock* insertAfter); protected: ssize_t optGetArrayRefScaleAndIndex(GenTree* mul, GenTree** pIndex DEBUGARG(bool bRngChk)); bool optReachWithoutCall(BasicBlock* srcBB, BasicBlock* dstBB); protected: bool optLoopsMarked; /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX RegAlloc XX XX XX XX Does the register allocation and puts the remaining lclVars on the stack XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: regNumber raUpdateRegStateForArg(RegState* regState, LclVarDsc* argDsc); void raMarkStkVars(); #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE #if defined(TARGET_AMD64) static bool varTypeNeedsPartialCalleeSave(var_types type) { assert(type != TYP_STRUCT); return (type == TYP_SIMD32); } #elif defined(TARGET_ARM64) static bool varTypeNeedsPartialCalleeSave(var_types type) { assert(type != TYP_STRUCT); // ARM64 ABI FP Callee save registers only require Callee to save lower 8 Bytes // For SIMD types longer than 8 bytes Caller is responsible for saving and restoring Upper bytes. return ((type == TYP_SIMD16) || (type == TYP_SIMD12)); } #else // !defined(TARGET_AMD64) && !defined(TARGET_ARM64) #error("Unknown target architecture for FEATURE_SIMD") #endif // !defined(TARGET_AMD64) && !defined(TARGET_ARM64) #endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE protected: // Some things are used by both LSRA and regpredict allocators. FrameType rpFrameType; bool rpMustCreateEBPCalled; // Set to true after we have called rpMustCreateEBPFrame once bool rpMustCreateEBPFrame(INDEBUG(const char** wbReason)); private: Lowering* m_pLowering; // Lowering; needed to Lower IR that's added or modified after Lowering. LinearScanInterface* m_pLinearScan; // Linear Scan allocator /* raIsVarargsStackArg is called by raMaskStkVars and by lvaComputeRefCounts. It identifies the special case where a varargs function has a parameter passed on the stack, other than the special varargs handle. Such parameters require special treatment, because they cannot be tracked by the GC (their offsets in the stack are not known at compile time). */ bool raIsVarargsStackArg(unsigned lclNum) { #ifdef TARGET_X86 LclVarDsc* varDsc = lvaGetDesc(lclNum); assert(varDsc->lvIsParam); return (info.compIsVarArgs && !varDsc->lvIsRegArg && (lclNum != lvaVarargsHandleArg)); #else // TARGET_X86 return false; #endif // TARGET_X86 } /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX EEInterface XX XX XX XX Get to the class and method info from the Execution Engine given XX XX tokens for the class and method XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: // Get handles void eeGetCallInfo(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedToken, CORINFO_CALLINFO_FLAGS flags, CORINFO_CALL_INFO* pResult); void eeGetFieldInfo(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_ACCESS_FLAGS flags, CORINFO_FIELD_INFO* pResult); // Get the flags bool eeIsValueClass(CORINFO_CLASS_HANDLE clsHnd); bool eeIsIntrinsic(CORINFO_METHOD_HANDLE ftn); bool eeIsFieldStatic(CORINFO_FIELD_HANDLE fldHnd); var_types eeGetFieldType(CORINFO_FIELD_HANDLE fldHnd, CORINFO_CLASS_HANDLE* pStructHnd = nullptr); #if defined(DEBUG) || defined(FEATURE_JIT_METHOD_PERF) || defined(FEATURE_SIMD) || defined(TRACK_LSRA_STATS) const char* eeGetMethodName(CORINFO_METHOD_HANDLE hnd, const char** className); const char* eeGetMethodFullName(CORINFO_METHOD_HANDLE hnd); unsigned compMethodHash(CORINFO_METHOD_HANDLE methodHandle); bool eeIsNativeMethod(CORINFO_METHOD_HANDLE method); CORINFO_METHOD_HANDLE eeGetMethodHandleForNative(CORINFO_METHOD_HANDLE method); #endif var_types eeGetArgType(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* sig); var_types eeGetArgType(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* sig, bool* isPinned); CORINFO_CLASS_HANDLE eeGetArgClass(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE list); CORINFO_CLASS_HANDLE eeGetClassFromContext(CORINFO_CONTEXT_HANDLE context); unsigned eeGetArgSize(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* sig); static unsigned eeGetArgAlignment(var_types type, bool isFloatHfa); // VOM info, method sigs void eeGetSig(unsigned sigTok, CORINFO_MODULE_HANDLE scope, CORINFO_CONTEXT_HANDLE context, CORINFO_SIG_INFO* retSig); void eeGetCallSiteSig(unsigned sigTok, CORINFO_MODULE_HANDLE scope, CORINFO_CONTEXT_HANDLE context, CORINFO_SIG_INFO* retSig); void eeGetMethodSig(CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* retSig, CORINFO_CLASS_HANDLE owner = nullptr); // Method entry-points, instrs CORINFO_METHOD_HANDLE eeMarkNativeTarget(CORINFO_METHOD_HANDLE method); CORINFO_EE_INFO eeInfo; bool eeInfoInitialized; CORINFO_EE_INFO* eeGetEEInfo(); // Gets the offset of a SDArray's first element static unsigned eeGetArrayDataOffset(); // Get the offset of a MDArray's first element static unsigned eeGetMDArrayDataOffset(unsigned rank); // Get the offset of a MDArray's dimension length for a given dimension. static unsigned eeGetMDArrayLengthOffset(unsigned rank, unsigned dimension); // Get the offset of a MDArray's lower bound for a given dimension. static unsigned eeGetMDArrayLowerBoundOffset(unsigned rank, unsigned dimension); GenTree* eeGetPInvokeCookie(CORINFO_SIG_INFO* szMetaSig); // Returns the page size for the target machine as reported by the EE. target_size_t eeGetPageSize() { return (target_size_t)eeGetEEInfo()->osPageSize; } //------------------------------------------------------------------------ // VirtualStubParam: virtual stub dispatch extra parameter (slot address). // // It represents Abi and target specific registers for the parameter. // class VirtualStubParamInfo { public: VirtualStubParamInfo(bool isCoreRTABI) { #if defined(TARGET_X86) reg = REG_EAX; regMask = RBM_EAX; #elif defined(TARGET_AMD64) if (isCoreRTABI) { reg = REG_R10; regMask = RBM_R10; } else { reg = REG_R11; regMask = RBM_R11; } #elif defined(TARGET_ARM) if (isCoreRTABI) { reg = REG_R12; regMask = RBM_R12; } else { reg = REG_R4; regMask = RBM_R4; } #elif defined(TARGET_ARM64) reg = REG_R11; regMask = RBM_R11; #else #error Unsupported or unset target architecture #endif } regNumber GetReg() const { return reg; } _regMask_enum GetRegMask() const { return regMask; } private: regNumber reg; _regMask_enum regMask; }; VirtualStubParamInfo* virtualStubParamInfo; bool IsTargetAbi(CORINFO_RUNTIME_ABI abi) { return eeGetEEInfo()->targetAbi == abi; } bool generateCFIUnwindCodes() { #if defined(FEATURE_CFI_SUPPORT) return TargetOS::IsUnix && IsTargetAbi(CORINFO_CORERT_ABI); #else return false; #endif } // Debugging support - Line number info void eeGetStmtOffsets(); unsigned eeBoundariesCount; ICorDebugInfo::OffsetMapping* eeBoundaries; // Boundaries to report to the EE void eeSetLIcount(unsigned count); void eeSetLIinfo(unsigned which, UNATIVE_OFFSET offs, IPmappingDscKind kind, const ILLocation& loc); void eeSetLIdone(); #ifdef DEBUG static void eeDispILOffs(IL_OFFSET offs); static void eeDispSourceMappingOffs(uint32_t offs); static void eeDispLineInfo(const ICorDebugInfo::OffsetMapping* line); void eeDispLineInfos(); #endif // DEBUG // Debugging support - Local var info void eeGetVars(); unsigned eeVarsCount; struct VarResultInfo { UNATIVE_OFFSET startOffset; UNATIVE_OFFSET endOffset; DWORD varNumber; CodeGenInterface::siVarLoc loc; } * eeVars; void eeSetLVcount(unsigned count); void eeSetLVinfo(unsigned which, UNATIVE_OFFSET startOffs, UNATIVE_OFFSET length, unsigned varNum, const CodeGenInterface::siVarLoc& loc); void eeSetLVdone(); #ifdef DEBUG void eeDispVar(ICorDebugInfo::NativeVarInfo* var); void eeDispVars(CORINFO_METHOD_HANDLE ftn, ULONG32 cVars, ICorDebugInfo::NativeVarInfo* vars); #endif // DEBUG // ICorJitInfo wrappers void eeReserveUnwindInfo(bool isFunclet, bool isColdCode, ULONG unwindSize); void eeAllocUnwindInfo(BYTE* pHotCode, BYTE* pColdCode, ULONG startOffset, ULONG endOffset, ULONG unwindSize, BYTE* pUnwindBlock, CorJitFuncKind funcKind); void eeSetEHcount(unsigned cEH); void eeSetEHinfo(unsigned EHnumber, const CORINFO_EH_CLAUSE* clause); WORD eeGetRelocTypeHint(void* target); // ICorStaticInfo wrapper functions bool eeTryResolveToken(CORINFO_RESOLVED_TOKEN* resolvedToken); #if defined(UNIX_AMD64_ABI) #ifdef DEBUG static void dumpSystemVClassificationType(SystemVClassificationType ct); #endif // DEBUG void eeGetSystemVAmd64PassStructInRegisterDescriptor( /*IN*/ CORINFO_CLASS_HANDLE structHnd, /*OUT*/ SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* structPassInRegDescPtr); #endif // UNIX_AMD64_ABI template <typename ParamType> bool eeRunWithErrorTrap(void (*function)(ParamType*), ParamType* param) { return eeRunWithErrorTrapImp(reinterpret_cast<void (*)(void*)>(function), reinterpret_cast<void*>(param)); } bool eeRunWithErrorTrapImp(void (*function)(void*), void* param); template <typename ParamType> bool eeRunWithSPMIErrorTrap(void (*function)(ParamType*), ParamType* param) { return eeRunWithSPMIErrorTrapImp(reinterpret_cast<void (*)(void*)>(function), reinterpret_cast<void*>(param)); } bool eeRunWithSPMIErrorTrapImp(void (*function)(void*), void* param); // Utility functions const char* eeGetFieldName(CORINFO_FIELD_HANDLE fieldHnd, const char** classNamePtr = nullptr); #if defined(DEBUG) const WCHAR* eeGetCPString(size_t stringHandle); #endif const char* eeGetClassName(CORINFO_CLASS_HANDLE clsHnd); static CORINFO_METHOD_HANDLE eeFindHelper(unsigned helper); static CorInfoHelpFunc eeGetHelperNum(CORINFO_METHOD_HANDLE method); static bool IsSharedStaticHelper(GenTree* tree); static bool IsGcSafePoint(GenTreeCall* call); static CORINFO_FIELD_HANDLE eeFindJitDataOffs(unsigned jitDataOffs); // returns true/false if 'field' is a Jit Data offset static bool eeIsJitDataOffs(CORINFO_FIELD_HANDLE field); // returns a number < 0 if 'field' is not a Jit Data offset, otherwise the data offset (limited to 2GB) static int eeGetJitDataOffs(CORINFO_FIELD_HANDLE field); /*****************************************************************************/ /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX CodeGenerator XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: CodeGenInterface* codeGen; // Record the instr offset mapping to the generated code jitstd::list<IPmappingDsc> genIPmappings; #ifdef DEBUG jitstd::list<PreciseIPMapping> genPreciseIPmappings; #endif // Managed RetVal - A side hash table meant to record the mapping from a // GT_CALL node to its debug info. This info is used to emit sequence points // that can be used by debugger to determine the native offset at which the // managed RetVal will be available. // // In fact we can store debug info in a GT_CALL node. This was ruled out in // favor of a side table for two reasons: 1) We need debug info for only those // GT_CALL nodes (created during importation) that correspond to an IL call and // whose return type is other than TYP_VOID. 2) GT_CALL node is a frequently used // structure and IL offset is needed only when generating debuggable code. Therefore // it is desirable to avoid memory size penalty in retail scenarios. typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, DebugInfo> CallSiteDebugInfoTable; CallSiteDebugInfoTable* genCallSite2DebugInfoMap; unsigned genReturnLocal; // Local number for the return value when applicable. BasicBlock* genReturnBB; // jumped to when not optimizing for speed. // The following properties are part of CodeGenContext. Getters are provided here for // convenience and backward compatibility, but the properties can only be set by invoking // the setter on CodeGenContext directly. emitter* GetEmitter() const { return codeGen->GetEmitter(); } bool isFramePointerUsed() const { return codeGen->isFramePointerUsed(); } bool GetInterruptible() { return codeGen->GetInterruptible(); } void SetInterruptible(bool value) { codeGen->SetInterruptible(value); } #if DOUBLE_ALIGN const bool genDoubleAlign() { return codeGen->doDoubleAlign(); } DWORD getCanDoubleAlign(); bool shouldDoubleAlign(unsigned refCntStk, unsigned refCntReg, weight_t refCntWtdReg, unsigned refCntStkParam, weight_t refCntWtdStkDbl); #endif // DOUBLE_ALIGN bool IsFullPtrRegMapRequired() { return codeGen->IsFullPtrRegMapRequired(); } void SetFullPtrRegMapRequired(bool value) { codeGen->SetFullPtrRegMapRequired(value); } // Things that MAY belong either in CodeGen or CodeGenContext #if defined(FEATURE_EH_FUNCLETS) FuncInfoDsc* compFuncInfos; unsigned short compCurrFuncIdx; unsigned short compFuncInfoCount; unsigned short compFuncCount() { assert(fgFuncletsCreated); return compFuncInfoCount; } #else // !FEATURE_EH_FUNCLETS // This is a no-op when there are no funclets! void genUpdateCurrentFunclet(BasicBlock* block) { return; } FuncInfoDsc compFuncInfoRoot; static const unsigned compCurrFuncIdx = 0; unsigned short compFuncCount() { return 1; } #endif // !FEATURE_EH_FUNCLETS FuncInfoDsc* funCurrentFunc(); void funSetCurrentFunc(unsigned funcIdx); FuncInfoDsc* funGetFunc(unsigned funcIdx); unsigned int funGetFuncIdx(BasicBlock* block); // LIVENESS VARSET_TP compCurLife; // current live variables GenTree* compCurLifeTree; // node after which compCurLife has been computed // Compare the given "newLife" with last set of live variables and update // codeGen "gcInfo", siScopes, "regSet" with the new variable's homes/liveness. template <bool ForCodeGen> void compChangeLife(VARSET_VALARG_TP newLife); // Update the GC's masks, register's masks and reports change on variable's homes given a set of // current live variables if changes have happened since "compCurLife". template <bool ForCodeGen> inline void compUpdateLife(VARSET_VALARG_TP newLife); // Gets a register mask that represent the kill set for a helper call since // not all JIT Helper calls follow the standard ABI on the target architecture. regMaskTP compHelperCallKillSet(CorInfoHelpFunc helper); #ifdef TARGET_ARM // Requires that "varDsc" be a promoted struct local variable being passed as an argument, beginning at // "firstArgRegNum", which is assumed to have already been aligned to the register alignment restriction of the // struct type. Adds bits to "*pArgSkippedRegMask" for any argument registers *not* used in passing "varDsc" -- // i.e., internal "holes" caused by internal alignment constraints. For example, if the struct contained an int and // a double, and we at R0 (on ARM), then R1 would be skipped, and the bit for R1 would be added to the mask. void fgAddSkippedRegsInPromotedStructArg(LclVarDsc* varDsc, unsigned firstArgRegNum, regMaskTP* pArgSkippedRegMask); #endif // TARGET_ARM // If "tree" is a indirection (GT_IND, or GT_OBJ) whose arg is an ADDR, whose arg is a LCL_VAR, return that LCL_VAR // node, else NULL. static GenTreeLclVar* fgIsIndirOfAddrOfLocal(GenTree* tree); // This map is indexed by GT_OBJ nodes that are address of promoted struct variables, which // have been annotated with the GTF_VAR_DEATH flag. If such a node is *not* mapped in this // table, one may assume that all the (tracked) field vars die at this GT_OBJ. Otherwise, // the node maps to a pointer to a VARSET_TP, containing set bits for each of the tracked field // vars of the promoted struct local that go dead at the given node (the set bits are the bits // for the tracked var indices of the field vars, as in a live var set). // // The map is allocated on demand so all map operations should use one of the following three // wrapper methods. NodeToVarsetPtrMap* m_promotedStructDeathVars; NodeToVarsetPtrMap* GetPromotedStructDeathVars() { if (m_promotedStructDeathVars == nullptr) { m_promotedStructDeathVars = new (getAllocator()) NodeToVarsetPtrMap(getAllocator()); } return m_promotedStructDeathVars; } void ClearPromotedStructDeathVars() { if (m_promotedStructDeathVars != nullptr) { m_promotedStructDeathVars->RemoveAll(); } } bool LookupPromotedStructDeathVars(GenTree* tree, VARSET_TP** bits) { *bits = nullptr; bool result = false; if (m_promotedStructDeathVars != nullptr) { result = m_promotedStructDeathVars->Lookup(tree, bits); } return result; } /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX UnwindInfo XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #if !defined(__GNUC__) #pragma region Unwind information #endif public: // // Infrastructure functions: start/stop/reserve/emit. // void unwindBegProlog(); void unwindEndProlog(); void unwindBegEpilog(); void unwindEndEpilog(); void unwindReserve(); void unwindEmit(void* pHotCode, void* pColdCode); // // Specific unwind information functions: called by code generation to indicate a particular // prolog or epilog unwindable instruction has been generated. // void unwindPush(regNumber reg); void unwindAllocStack(unsigned size); void unwindSetFrameReg(regNumber reg, unsigned offset); void unwindSaveReg(regNumber reg, unsigned offset); #if defined(TARGET_ARM) void unwindPushMaskInt(regMaskTP mask); void unwindPushMaskFloat(regMaskTP mask); void unwindPopMaskInt(regMaskTP mask); void unwindPopMaskFloat(regMaskTP mask); void unwindBranch16(); // The epilog terminates with a 16-bit branch (e.g., "bx lr") void unwindNop(unsigned codeSizeInBytes); // Generate unwind NOP code. 'codeSizeInBytes' is 2 or 4 bytes. Only // called via unwindPadding(). void unwindPadding(); // Generate a sequence of unwind NOP codes representing instructions between the last // instruction and the current location. #endif // TARGET_ARM #if defined(TARGET_ARM64) void unwindNop(); void unwindPadding(); // Generate a sequence of unwind NOP codes representing instructions between the last // instruction and the current location. void unwindSaveReg(regNumber reg, int offset); // str reg, [sp, #offset] void unwindSaveRegPreindexed(regNumber reg, int offset); // str reg, [sp, #offset]! void unwindSaveRegPair(regNumber reg1, regNumber reg2, int offset); // stp reg1, reg2, [sp, #offset] void unwindSaveRegPairPreindexed(regNumber reg1, regNumber reg2, int offset); // stp reg1, reg2, [sp, #offset]! void unwindSaveNext(); // unwind code: save_next void unwindReturn(regNumber reg); // ret lr #endif // defined(TARGET_ARM64) // // Private "helper" functions for the unwind implementation. // private: #if defined(FEATURE_EH_FUNCLETS) void unwindGetFuncLocations(FuncInfoDsc* func, bool getHotSectionData, /* OUT */ emitLocation** ppStartLoc, /* OUT */ emitLocation** ppEndLoc); #endif // FEATURE_EH_FUNCLETS void unwindReserveFunc(FuncInfoDsc* func); void unwindEmitFunc(FuncInfoDsc* func, void* pHotCode, void* pColdCode); #if defined(TARGET_AMD64) || (defined(TARGET_X86) && defined(FEATURE_EH_FUNCLETS)) void unwindReserveFuncHelper(FuncInfoDsc* func, bool isHotCode); void unwindEmitFuncHelper(FuncInfoDsc* func, void* pHotCode, void* pColdCode, bool isHotCode); #endif // TARGET_AMD64 || (TARGET_X86 && FEATURE_EH_FUNCLETS) UNATIVE_OFFSET unwindGetCurrentOffset(FuncInfoDsc* func); #if defined(TARGET_AMD64) void unwindBegPrologWindows(); void unwindPushWindows(regNumber reg); void unwindAllocStackWindows(unsigned size); void unwindSetFrameRegWindows(regNumber reg, unsigned offset); void unwindSaveRegWindows(regNumber reg, unsigned offset); #ifdef UNIX_AMD64_ABI void unwindSaveRegCFI(regNumber reg, unsigned offset); #endif // UNIX_AMD64_ABI #elif defined(TARGET_ARM) void unwindPushPopMaskInt(regMaskTP mask, bool useOpsize16); void unwindPushPopMaskFloat(regMaskTP mask); #endif // TARGET_ARM #if defined(FEATURE_CFI_SUPPORT) short mapRegNumToDwarfReg(regNumber reg); void createCfiCode(FuncInfoDsc* func, UNATIVE_OFFSET codeOffset, UCHAR opcode, short dwarfReg, INT offset = 0); void unwindPushPopCFI(regNumber reg); void unwindBegPrologCFI(); void unwindPushPopMaskCFI(regMaskTP regMask, bool isFloat); void unwindAllocStackCFI(unsigned size); void unwindSetFrameRegCFI(regNumber reg, unsigned offset); void unwindEmitFuncCFI(FuncInfoDsc* func, void* pHotCode, void* pColdCode); #ifdef DEBUG void DumpCfiInfo(bool isHotCode, UNATIVE_OFFSET startOffset, UNATIVE_OFFSET endOffset, DWORD cfiCodeBytes, const CFI_CODE* const pCfiCode); #endif #endif // FEATURE_CFI_SUPPORT #if !defined(__GNUC__) #pragma endregion // Note: region is NOT under !defined(__GNUC__) #endif /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX SIMD XX XX XX XX Info about SIMD types, methods and the SIMD assembly (i.e. the assembly XX XX that contains the distinguished, well-known SIMD type definitions). XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ bool IsBaselineSimdIsaSupported() { #ifdef FEATURE_SIMD #if defined(TARGET_XARCH) CORINFO_InstructionSet minimumIsa = InstructionSet_SSE2; #elif defined(TARGET_ARM64) CORINFO_InstructionSet minimumIsa = InstructionSet_AdvSimd; #else #error Unsupported platform #endif // !TARGET_XARCH && !TARGET_ARM64 return compOpportunisticallyDependsOn(minimumIsa); #else return false; #endif } #if defined(DEBUG) bool IsBaselineSimdIsaSupportedDebugOnly() { #ifdef FEATURE_SIMD #if defined(TARGET_XARCH) CORINFO_InstructionSet minimumIsa = InstructionSet_SSE2; #elif defined(TARGET_ARM64) CORINFO_InstructionSet minimumIsa = InstructionSet_AdvSimd; #else #error Unsupported platform #endif // !TARGET_XARCH && !TARGET_ARM64 return compIsaSupportedDebugOnly(minimumIsa); #else return false; #endif // FEATURE_SIMD } #endif // DEBUG // Get highest available level for SIMD codegen SIMDLevel getSIMDSupportLevel() { #if defined(TARGET_XARCH) if (compOpportunisticallyDependsOn(InstructionSet_AVX2)) { return SIMD_AVX2_Supported; } if (compOpportunisticallyDependsOn(InstructionSet_SSE42)) { return SIMD_SSE4_Supported; } // min bar is SSE2 return SIMD_SSE2_Supported; #else assert(!"Available instruction set(s) for SIMD codegen is not defined for target arch"); unreached(); return SIMD_Not_Supported; #endif } bool isIntrinsicType(CORINFO_CLASS_HANDLE clsHnd) { return info.compCompHnd->isIntrinsicType(clsHnd); } const char* getClassNameFromMetadata(CORINFO_CLASS_HANDLE cls, const char** namespaceName) { return info.compCompHnd->getClassNameFromMetadata(cls, namespaceName); } CORINFO_CLASS_HANDLE getTypeInstantiationArgument(CORINFO_CLASS_HANDLE cls, unsigned index) { return info.compCompHnd->getTypeInstantiationArgument(cls, index); } #ifdef FEATURE_SIMD // Should we support SIMD intrinsics? bool featureSIMD; // Should we recognize SIMD types? // We always do this on ARM64 to support HVA types. bool supportSIMDTypes() { #ifdef TARGET_ARM64 return true; #else return featureSIMD; #endif } // Have we identified any SIMD types? // This is currently used by struct promotion to avoid getting type information for a struct // field to see if it is a SIMD type, if we haven't seen any SIMD types or operations in // the method. bool _usesSIMDTypes; bool usesSIMDTypes() { return _usesSIMDTypes; } void setUsesSIMDTypes(bool value) { _usesSIMDTypes = value; } // This is a temp lclVar allocated on the stack as TYP_SIMD. It is used to implement intrinsics // that require indexed access to the individual fields of the vector, which is not well supported // by the hardware. It is allocated when/if such situations are encountered during Lowering. unsigned lvaSIMDInitTempVarNum; struct SIMDHandlesCache { // SIMD Types CORINFO_CLASS_HANDLE SIMDFloatHandle; CORINFO_CLASS_HANDLE SIMDDoubleHandle; CORINFO_CLASS_HANDLE SIMDIntHandle; CORINFO_CLASS_HANDLE SIMDUShortHandle; CORINFO_CLASS_HANDLE SIMDUByteHandle; CORINFO_CLASS_HANDLE SIMDShortHandle; CORINFO_CLASS_HANDLE SIMDByteHandle; CORINFO_CLASS_HANDLE SIMDLongHandle; CORINFO_CLASS_HANDLE SIMDUIntHandle; CORINFO_CLASS_HANDLE SIMDULongHandle; CORINFO_CLASS_HANDLE SIMDNIntHandle; CORINFO_CLASS_HANDLE SIMDNUIntHandle; CORINFO_CLASS_HANDLE SIMDVector2Handle; CORINFO_CLASS_HANDLE SIMDVector3Handle; CORINFO_CLASS_HANDLE SIMDVector4Handle; CORINFO_CLASS_HANDLE SIMDVectorHandle; #ifdef FEATURE_HW_INTRINSICS #if defined(TARGET_ARM64) CORINFO_CLASS_HANDLE Vector64FloatHandle; CORINFO_CLASS_HANDLE Vector64DoubleHandle; CORINFO_CLASS_HANDLE Vector64IntHandle; CORINFO_CLASS_HANDLE Vector64UShortHandle; CORINFO_CLASS_HANDLE Vector64UByteHandle; CORINFO_CLASS_HANDLE Vector64ShortHandle; CORINFO_CLASS_HANDLE Vector64ByteHandle; CORINFO_CLASS_HANDLE Vector64LongHandle; CORINFO_CLASS_HANDLE Vector64UIntHandle; CORINFO_CLASS_HANDLE Vector64ULongHandle; CORINFO_CLASS_HANDLE Vector64NIntHandle; CORINFO_CLASS_HANDLE Vector64NUIntHandle; #endif // defined(TARGET_ARM64) CORINFO_CLASS_HANDLE Vector128FloatHandle; CORINFO_CLASS_HANDLE Vector128DoubleHandle; CORINFO_CLASS_HANDLE Vector128IntHandle; CORINFO_CLASS_HANDLE Vector128UShortHandle; CORINFO_CLASS_HANDLE Vector128UByteHandle; CORINFO_CLASS_HANDLE Vector128ShortHandle; CORINFO_CLASS_HANDLE Vector128ByteHandle; CORINFO_CLASS_HANDLE Vector128LongHandle; CORINFO_CLASS_HANDLE Vector128UIntHandle; CORINFO_CLASS_HANDLE Vector128ULongHandle; CORINFO_CLASS_HANDLE Vector128NIntHandle; CORINFO_CLASS_HANDLE Vector128NUIntHandle; #if defined(TARGET_XARCH) CORINFO_CLASS_HANDLE Vector256FloatHandle; CORINFO_CLASS_HANDLE Vector256DoubleHandle; CORINFO_CLASS_HANDLE Vector256IntHandle; CORINFO_CLASS_HANDLE Vector256UShortHandle; CORINFO_CLASS_HANDLE Vector256UByteHandle; CORINFO_CLASS_HANDLE Vector256ShortHandle; CORINFO_CLASS_HANDLE Vector256ByteHandle; CORINFO_CLASS_HANDLE Vector256LongHandle; CORINFO_CLASS_HANDLE Vector256UIntHandle; CORINFO_CLASS_HANDLE Vector256ULongHandle; CORINFO_CLASS_HANDLE Vector256NIntHandle; CORINFO_CLASS_HANDLE Vector256NUIntHandle; #endif // defined(TARGET_XARCH) #endif // FEATURE_HW_INTRINSICS SIMDHandlesCache() { memset(this, 0, sizeof(*this)); } }; SIMDHandlesCache* m_simdHandleCache; // Get an appropriate "zero" for the given type and class handle. GenTree* gtGetSIMDZero(var_types simdType, CorInfoType simdBaseJitType, CORINFO_CLASS_HANDLE simdHandle); // Get the handle for a SIMD type. CORINFO_CLASS_HANDLE gtGetStructHandleForSIMD(var_types simdType, CorInfoType simdBaseJitType) { if (m_simdHandleCache == nullptr) { // This may happen if the JIT generates SIMD node on its own, without importing them. // Otherwise getBaseJitTypeAndSizeOfSIMDType should have created the cache. return NO_CLASS_HANDLE; } if (simdBaseJitType == CORINFO_TYPE_FLOAT) { switch (simdType) { case TYP_SIMD8: return m_simdHandleCache->SIMDVector2Handle; case TYP_SIMD12: return m_simdHandleCache->SIMDVector3Handle; case TYP_SIMD16: if ((getSIMDVectorType() == TYP_SIMD32) || (m_simdHandleCache->SIMDVector4Handle != NO_CLASS_HANDLE)) { return m_simdHandleCache->SIMDVector4Handle; } break; case TYP_SIMD32: break; default: unreached(); } } assert(emitTypeSize(simdType) <= largestEnregisterableStructSize()); switch (simdBaseJitType) { case CORINFO_TYPE_FLOAT: return m_simdHandleCache->SIMDFloatHandle; case CORINFO_TYPE_DOUBLE: return m_simdHandleCache->SIMDDoubleHandle; case CORINFO_TYPE_INT: return m_simdHandleCache->SIMDIntHandle; case CORINFO_TYPE_USHORT: return m_simdHandleCache->SIMDUShortHandle; case CORINFO_TYPE_UBYTE: return m_simdHandleCache->SIMDUByteHandle; case CORINFO_TYPE_SHORT: return m_simdHandleCache->SIMDShortHandle; case CORINFO_TYPE_BYTE: return m_simdHandleCache->SIMDByteHandle; case CORINFO_TYPE_LONG: return m_simdHandleCache->SIMDLongHandle; case CORINFO_TYPE_UINT: return m_simdHandleCache->SIMDUIntHandle; case CORINFO_TYPE_ULONG: return m_simdHandleCache->SIMDULongHandle; case CORINFO_TYPE_NATIVEINT: return m_simdHandleCache->SIMDNIntHandle; case CORINFO_TYPE_NATIVEUINT: return m_simdHandleCache->SIMDNUIntHandle; default: assert(!"Didn't find a class handle for simdType"); } return NO_CLASS_HANDLE; } // Returns true if this is a SIMD type that should be considered an opaque // vector type (i.e. do not analyze or promote its fields). // Note that all but the fixed vector types are opaque, even though they may // actually be declared as having fields. bool isOpaqueSIMDType(CORINFO_CLASS_HANDLE structHandle) const { return ((m_simdHandleCache != nullptr) && (structHandle != m_simdHandleCache->SIMDVector2Handle) && (structHandle != m_simdHandleCache->SIMDVector3Handle) && (structHandle != m_simdHandleCache->SIMDVector4Handle)); } // Returns true if the tree corresponds to a TYP_SIMD lcl var. // Note that both SIMD vector args and locals are mared as lvSIMDType = true, but // type of an arg node is TYP_BYREF and a local node is TYP_SIMD or TYP_STRUCT. bool isSIMDTypeLocal(GenTree* tree) { return tree->OperIsLocal() && lvaGetDesc(tree->AsLclVarCommon())->lvSIMDType; } // Returns true if the lclVar is an opaque SIMD type. bool isOpaqueSIMDLclVar(const LclVarDsc* varDsc) const { if (!varDsc->lvSIMDType) { return false; } return isOpaqueSIMDType(varDsc->GetStructHnd()); } static bool isRelOpSIMDIntrinsic(SIMDIntrinsicID intrinsicId) { return (intrinsicId == SIMDIntrinsicEqual); } // Returns base JIT type of a TYP_SIMD local. // Returns CORINFO_TYPE_UNDEF if the local is not TYP_SIMD. CorInfoType getBaseJitTypeOfSIMDLocal(GenTree* tree) { if (isSIMDTypeLocal(tree)) { return lvaGetDesc(tree->AsLclVarCommon())->GetSimdBaseJitType(); } return CORINFO_TYPE_UNDEF; } bool isSIMDClass(CORINFO_CLASS_HANDLE clsHnd) { if (isIntrinsicType(clsHnd)) { const char* namespaceName = nullptr; (void)getClassNameFromMetadata(clsHnd, &namespaceName); return strcmp(namespaceName, "System.Numerics") == 0; } return false; } bool isSIMDClass(typeInfo* pTypeInfo) { return pTypeInfo->IsStruct() && isSIMDClass(pTypeInfo->GetClassHandleForValueClass()); } bool isHWSIMDClass(CORINFO_CLASS_HANDLE clsHnd) { #ifdef FEATURE_HW_INTRINSICS if (isIntrinsicType(clsHnd)) { const char* namespaceName = nullptr; (void)getClassNameFromMetadata(clsHnd, &namespaceName); return strcmp(namespaceName, "System.Runtime.Intrinsics") == 0; } #endif // FEATURE_HW_INTRINSICS return false; } bool isHWSIMDClass(typeInfo* pTypeInfo) { #ifdef FEATURE_HW_INTRINSICS return pTypeInfo->IsStruct() && isHWSIMDClass(pTypeInfo->GetClassHandleForValueClass()); #else return false; #endif } bool isSIMDorHWSIMDClass(CORINFO_CLASS_HANDLE clsHnd) { return isSIMDClass(clsHnd) || isHWSIMDClass(clsHnd); } bool isSIMDorHWSIMDClass(typeInfo* pTypeInfo) { return isSIMDClass(pTypeInfo) || isHWSIMDClass(pTypeInfo); } // Get the base (element) type and size in bytes for a SIMD type. Returns CORINFO_TYPE_UNDEF // if it is not a SIMD type or is an unsupported base JIT type. CorInfoType getBaseJitTypeAndSizeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd, unsigned* sizeBytes = nullptr); CorInfoType getBaseJitTypeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd) { return getBaseJitTypeAndSizeOfSIMDType(typeHnd, nullptr); } // Get SIMD Intrinsic info given the method handle. // Also sets typeHnd, argCount, baseType and sizeBytes out params. const SIMDIntrinsicInfo* getSIMDIntrinsicInfo(CORINFO_CLASS_HANDLE* typeHnd, CORINFO_METHOD_HANDLE methodHnd, CORINFO_SIG_INFO* sig, bool isNewObj, unsigned* argCount, CorInfoType* simdBaseJitType, unsigned* sizeBytes); // Pops and returns GenTree node from importers type stack. // Normalizes TYP_STRUCT value in case of GT_CALL, GT_RET_EXPR and arg nodes. GenTree* impSIMDPopStack(var_types type, bool expectAddr = false, CORINFO_CLASS_HANDLE structType = nullptr); // Transforms operands and returns the SIMD intrinsic to be applied on // transformed operands to obtain given relop result. SIMDIntrinsicID impSIMDRelOp(SIMDIntrinsicID relOpIntrinsicId, CORINFO_CLASS_HANDLE typeHnd, unsigned simdVectorSize, CorInfoType* inOutBaseJitType, GenTree** op1, GenTree** op2); #if defined(TARGET_XARCH) // Transforms operands and returns the SIMD intrinsic to be applied on // transformed operands to obtain == comparison result. SIMDIntrinsicID impSIMDLongRelOpEqual(CORINFO_CLASS_HANDLE typeHnd, unsigned simdVectorSize, GenTree** op1, GenTree** op2); #endif // defined(TARGET_XARCH) void setLclRelatedToSIMDIntrinsic(GenTree* tree); bool areFieldsContiguous(GenTree* op1, GenTree* op2); bool areLocalFieldsContiguous(GenTreeLclFld* first, GenTreeLclFld* second); bool areArrayElementsContiguous(GenTree* op1, GenTree* op2); bool areArgumentsContiguous(GenTree* op1, GenTree* op2); GenTree* createAddressNodeForSIMDInit(GenTree* tree, unsigned simdSize); // check methodHnd to see if it is a SIMD method that is expanded as an intrinsic in the JIT. GenTree* impSIMDIntrinsic(OPCODE opcode, GenTree* newobjThis, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, unsigned methodFlags, int memberRef); GenTree* getOp1ForConstructor(OPCODE opcode, GenTree* newobjThis, CORINFO_CLASS_HANDLE clsHnd); // Whether SIMD vector occupies part of SIMD register. // SSE2: vector2f/3f are considered sub register SIMD types. // AVX: vector2f, 3f and 4f are all considered sub register SIMD types. bool isSubRegisterSIMDType(GenTreeSIMD* simdNode) { unsigned vectorRegisterByteLength; #if defined(TARGET_XARCH) // Calling the getSIMDVectorRegisterByteLength api causes the size of Vector<T> to be recorded // with the AOT compiler, so that it cannot change from aot compilation time to runtime // This api does not require such fixing as it merely pertains to the size of the simd type // relative to the Vector<T> size as used at compile time. (So detecting a vector length of 16 here // does not preclude the code from being used on a machine with a larger vector length.) if (getSIMDSupportLevel() < SIMD_AVX2_Supported) { vectorRegisterByteLength = 16; } else { vectorRegisterByteLength = 32; } #else vectorRegisterByteLength = getSIMDVectorRegisterByteLength(); #endif return (simdNode->GetSimdSize() < vectorRegisterByteLength); } // Get the type for the hardware SIMD vector. // This is the maximum SIMD type supported for this target. var_types getSIMDVectorType() { #if defined(TARGET_XARCH) if (getSIMDSupportLevel() == SIMD_AVX2_Supported) { return TYP_SIMD32; } else { // Verify and record that AVX2 isn't supported compVerifyInstructionSetUnusable(InstructionSet_AVX2); assert(getSIMDSupportLevel() >= SIMD_SSE2_Supported); return TYP_SIMD16; } #elif defined(TARGET_ARM64) return TYP_SIMD16; #else assert(!"getSIMDVectorType() unimplemented on target arch"); unreached(); #endif } // Get the size of the SIMD type in bytes int getSIMDTypeSizeInBytes(CORINFO_CLASS_HANDLE typeHnd) { unsigned sizeBytes = 0; (void)getBaseJitTypeAndSizeOfSIMDType(typeHnd, &sizeBytes); return sizeBytes; } // Get the the number of elements of baseType of SIMD vector given by its size and baseType static int getSIMDVectorLength(unsigned simdSize, var_types baseType); // Get the the number of elements of baseType of SIMD vector given by its type handle int getSIMDVectorLength(CORINFO_CLASS_HANDLE typeHnd); // Get preferred alignment of SIMD type. int getSIMDTypeAlignment(var_types simdType); // Get the number of bytes in a System.Numeric.Vector<T> for the current compilation. // Note - cannot be used for System.Runtime.Intrinsic unsigned getSIMDVectorRegisterByteLength() { #if defined(TARGET_XARCH) if (getSIMDSupportLevel() == SIMD_AVX2_Supported) { return YMM_REGSIZE_BYTES; } else { // Verify and record that AVX2 isn't supported compVerifyInstructionSetUnusable(InstructionSet_AVX2); assert(getSIMDSupportLevel() >= SIMD_SSE2_Supported); return XMM_REGSIZE_BYTES; } #elif defined(TARGET_ARM64) return FP_REGSIZE_BYTES; #else assert(!"getSIMDVectorRegisterByteLength() unimplemented on target arch"); unreached(); #endif } // The minimum and maximum possible number of bytes in a SIMD vector. // maxSIMDStructBytes // The minimum SIMD size supported by System.Numeric.Vectors or System.Runtime.Intrinsic // SSE: 16-byte Vector<T> and Vector128<T> // AVX: 32-byte Vector256<T> (Vector<T> is 16-byte) // AVX2: 32-byte Vector<T> and Vector256<T> unsigned int maxSIMDStructBytes() { #if defined(FEATURE_HW_INTRINSICS) && defined(TARGET_XARCH) if (compOpportunisticallyDependsOn(InstructionSet_AVX)) { return YMM_REGSIZE_BYTES; } else { // Verify and record that AVX2 isn't supported compVerifyInstructionSetUnusable(InstructionSet_AVX2); assert(getSIMDSupportLevel() >= SIMD_SSE2_Supported); return XMM_REGSIZE_BYTES; } #else return getSIMDVectorRegisterByteLength(); #endif } unsigned int minSIMDStructBytes() { return emitTypeSize(TYP_SIMD8); } public: // Returns the codegen type for a given SIMD size. static var_types getSIMDTypeForSize(unsigned size) { var_types simdType = TYP_UNDEF; if (size == 8) { simdType = TYP_SIMD8; } else if (size == 12) { simdType = TYP_SIMD12; } else if (size == 16) { simdType = TYP_SIMD16; } else if (size == 32) { simdType = TYP_SIMD32; } else { noway_assert(!"Unexpected size for SIMD type"); } return simdType; } private: unsigned getSIMDInitTempVarNum(var_types simdType); #else // !FEATURE_SIMD bool isOpaqueSIMDLclVar(LclVarDsc* varDsc) { return false; } #endif // FEATURE_SIMD public: //------------------------------------------------------------------------ // largestEnregisterableStruct: The size in bytes of the largest struct that can be enregistered. // // Notes: It is not guaranteed that the struct of this size or smaller WILL be a // candidate for enregistration. unsigned largestEnregisterableStructSize() { #ifdef FEATURE_SIMD #if defined(FEATURE_HW_INTRINSICS) && defined(TARGET_XARCH) if (opts.IsReadyToRun()) { // Return constant instead of maxSIMDStructBytes, as maxSIMDStructBytes performs // checks that are effected by the current level of instruction set support would // otherwise cause the highest level of instruction set support to be reported to crossgen2. // and this api is only ever used as an optimization or assert, so no reporting should // ever happen. return YMM_REGSIZE_BYTES; } #endif // defined(FEATURE_HW_INTRINSICS) && defined(TARGET_XARCH) unsigned vectorRegSize = maxSIMDStructBytes(); assert(vectorRegSize >= TARGET_POINTER_SIZE); return vectorRegSize; #else // !FEATURE_SIMD return TARGET_POINTER_SIZE; #endif // !FEATURE_SIMD } // Use to determine if a struct *might* be a SIMD type. As this function only takes a size, many // structs will fit the criteria. bool structSizeMightRepresentSIMDType(size_t structSize) { #ifdef FEATURE_SIMD // Do not use maxSIMDStructBytes as that api in R2R on X86 and X64 may notify the JIT // about the size of a struct under the assumption that the struct size needs to be recorded. // By using largestEnregisterableStructSize here, the detail of whether or not Vector256<T> is // enregistered or not will not be messaged to the R2R compiler. return (structSize >= minSIMDStructBytes()) && (structSize <= largestEnregisterableStructSize()); #else return false; #endif // FEATURE_SIMD } #ifdef FEATURE_SIMD static bool vnEncodesResultTypeForSIMDIntrinsic(SIMDIntrinsicID intrinsicId); #endif // !FEATURE_SIMD #ifdef FEATURE_HW_INTRINSICS static bool vnEncodesResultTypeForHWIntrinsic(NamedIntrinsic hwIntrinsicID); #endif // FEATURE_HW_INTRINSICS private: // These routines need not be enclosed under FEATURE_SIMD since lvIsSIMDType() // is defined for both FEATURE_SIMD and !FEATURE_SIMD apropriately. The use // of this routines also avoids the need of #ifdef FEATURE_SIMD specific code. // Is this var is of type simd struct? bool lclVarIsSIMDType(unsigned varNum) { return lvaGetDesc(varNum)->lvIsSIMDType(); } // Is this Local node a SIMD local? bool lclVarIsSIMDType(GenTreeLclVarCommon* lclVarTree) { return lclVarIsSIMDType(lclVarTree->GetLclNum()); } // Returns true if the TYP_SIMD locals on stack are aligned at their // preferred byte boundary specified by getSIMDTypeAlignment(). // // As per the Intel manual, the preferred alignment for AVX vectors is // 32-bytes. It is not clear whether additional stack space used in // aligning stack is worth the benefit and for now will use 16-byte // alignment for AVX 256-bit vectors with unaligned load/stores to/from // memory. On x86, the stack frame is aligned to 4 bytes. We need to extend // existing support for double (8-byte) alignment to 16 or 32 byte // alignment for frames with local SIMD vars, if that is determined to be // profitable. // // On Amd64 and SysV, RSP+8 is aligned on entry to the function (before // prolog has run). This means that in RBP-based frames RBP will be 16-byte // aligned. For RSP-based frames these are only sometimes aligned, depending // on the frame size. // bool isSIMDTypeLocalAligned(unsigned varNum) { #if defined(FEATURE_SIMD) && ALIGN_SIMD_TYPES if (lclVarIsSIMDType(varNum) && lvaTable[varNum].lvType != TYP_BYREF) { // TODO-Cleanup: Can't this use the lvExactSize on the varDsc? int alignment = getSIMDTypeAlignment(lvaTable[varNum].lvType); if (alignment <= STACK_ALIGN) { bool rbpBased; int off = lvaFrameAddress(varNum, &rbpBased); // On SysV and Winx64 ABIs RSP+8 will be 16-byte aligned at the // first instruction of a function. If our frame is RBP based // then RBP will always be 16 bytes aligned, so we can simply // check the offset. if (rbpBased) { return (off % alignment) == 0; } // For RSP-based frame the alignment of RSP depends on our // locals. rsp+8 is aligned on entry and we just subtract frame // size so it is not hard to compute. Note that the compiler // tries hard to make sure the frame size means RSP will be // 16-byte aligned, but for leaf functions without locals (i.e. // frameSize = 0) it will not be. int frameSize = codeGen->genTotalFrameSize(); return ((8 - frameSize + off) % alignment) == 0; } } #endif // FEATURE_SIMD return false; } #ifdef DEBUG // Answer the question: Is a particular ISA supported? // Use this api when asking the question so that future // ISA questions can be asked correctly or when asserting // support/nonsupport for an instruction set bool compIsaSupportedDebugOnly(CORINFO_InstructionSet isa) const { #if defined(TARGET_XARCH) || defined(TARGET_ARM64) return (opts.compSupportsISA & (1ULL << isa)) != 0; #else return false; #endif } #endif // DEBUG bool notifyInstructionSetUsage(CORINFO_InstructionSet isa, bool supported) const; // Answer the question: Is a particular ISA allowed to be used implicitly by optimizations? // The result of this api call will exactly match the target machine // on which the function is executed (except for CoreLib, where there are special rules) bool compExactlyDependsOn(CORINFO_InstructionSet isa) const { #if defined(TARGET_XARCH) || defined(TARGET_ARM64) uint64_t isaBit = (1ULL << isa); if ((opts.compSupportsISAReported & isaBit) == 0) { if (notifyInstructionSetUsage(isa, (opts.compSupportsISA & isaBit) != 0)) ((Compiler*)this)->opts.compSupportsISAExactly |= isaBit; ((Compiler*)this)->opts.compSupportsISAReported |= isaBit; } return (opts.compSupportsISAExactly & isaBit) != 0; #else return false; #endif } // Ensure that code will not execute if an instruction set is usable. Call only // if the instruction set has previously reported as unusable, but when // that that status has not yet been recorded to the AOT compiler void compVerifyInstructionSetUnusable(CORINFO_InstructionSet isa) { // use compExactlyDependsOn to capture are record the use of the isa bool isaUsable = compExactlyDependsOn(isa); // Assert that the is unusable. If true, this function should never be called. assert(!isaUsable); } // Answer the question: Is a particular ISA allowed to be used implicitly by optimizations? // The result of this api call will match the target machine if the result is true // If the result is false, then the target machine may have support for the instruction bool compOpportunisticallyDependsOn(CORINFO_InstructionSet isa) const { if ((opts.compSupportsISA & (1ULL << isa)) != 0) { return compExactlyDependsOn(isa); } else { return false; } } // Answer the question: Is a particular ISA supported for explicit hardware intrinsics? bool compHWIntrinsicDependsOn(CORINFO_InstructionSet isa) const { // Report intent to use the ISA to the EE compExactlyDependsOn(isa); return ((opts.compSupportsISA & (1ULL << isa)) != 0); } bool canUseVexEncoding() const { #ifdef TARGET_XARCH return compOpportunisticallyDependsOn(InstructionSet_AVX); #else return false; #endif } /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Compiler XX XX XX XX Generic info about the compilation and the method being compiled. XX XX It is responsible for driving the other phases. XX XX It is also responsible for all the memory management. XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: Compiler* InlineeCompiler; // The Compiler instance for the inlinee InlineResult* compInlineResult; // The result of importing the inlinee method. bool compDoAggressiveInlining; // If true, mark every method as CORINFO_FLG_FORCEINLINE bool compJmpOpUsed; // Does the method do a JMP bool compLongUsed; // Does the method use TYP_LONG bool compFloatingPointUsed; // Does the method use TYP_FLOAT or TYP_DOUBLE bool compTailCallUsed; // Does the method do a tailcall bool compTailPrefixSeen; // Does the method IL have tail. prefix bool compLocallocSeen; // Does the method IL have localloc opcode bool compLocallocUsed; // Does the method use localloc. bool compLocallocOptimized; // Does the method have an optimized localloc bool compQmarkUsed; // Does the method use GT_QMARK/GT_COLON bool compQmarkRationalized; // Is it allowed to use a GT_QMARK/GT_COLON node. bool compHasBackwardJump; // Does the method (or some inlinee) have a lexically backwards jump? bool compHasBackwardJumpInHandler; // Does the method have a lexically backwards jump in a handler? bool compSwitchedToOptimized; // Codegen initially was Tier0 but jit switched to FullOpts bool compSwitchedToMinOpts; // Codegen initially was Tier1/FullOpts but jit switched to MinOpts bool compSuppressedZeroInit; // There are vars with lvSuppressedZeroInit set // NOTE: These values are only reliable after // the importing is completely finished. #ifdef DEBUG // State information - which phases have completed? // These are kept together for easy discoverability bool bRangeAllowStress; bool compCodeGenDone; int64_t compNumStatementLinksTraversed; // # of links traversed while doing debug checks bool fgNormalizeEHDone; // Has the flowgraph EH normalization phase been done? size_t compSizeEstimate; // The estimated size of the method as per `gtSetEvalOrder`. size_t compCycleEstimate; // The estimated cycle count of the method as per `gtSetEvalOrder` #endif // DEBUG bool fgLocalVarLivenessDone; // Note that this one is used outside of debug. bool fgLocalVarLivenessChanged; bool compLSRADone; bool compRationalIRForm; bool compUsesThrowHelper; // There is a call to a THROW_HELPER for the compiled method. bool compGeneratingProlog; bool compGeneratingEpilog; bool compNeedsGSSecurityCookie; // There is an unsafe buffer (or localloc) on the stack. // Insert cookie on frame and code to check the cookie, like VC++ -GS. bool compGSReorderStackLayout; // There is an unsafe buffer on the stack, reorder locals and make local // copies of susceptible parameters to avoid buffer overrun attacks through locals/params bool getNeedsGSSecurityCookie() const { return compNeedsGSSecurityCookie; } void setNeedsGSSecurityCookie() { compNeedsGSSecurityCookie = true; } FrameLayoutState lvaDoneFrameLayout; // The highest frame layout state that we've completed. During // frame layout calculations, this is the level we are currently // computing. //---------------------------- JITing options ----------------------------- enum codeOptimize { BLENDED_CODE, SMALL_CODE, FAST_CODE, COUNT_OPT_CODE }; struct Options { JitFlags* jitFlags; // all flags passed from the EE // The instruction sets that the compiler is allowed to emit. uint64_t compSupportsISA; // The instruction sets that were reported to the VM as being used by the current method. Subset of // compSupportsISA. uint64_t compSupportsISAReported; // The instruction sets that the compiler is allowed to take advantage of implicitly during optimizations. // Subset of compSupportsISA. // The instruction sets available in compSupportsISA and not available in compSupportsISAExactly can be only // used via explicit hardware intrinsics. uint64_t compSupportsISAExactly; void setSupportedISAs(CORINFO_InstructionSetFlags isas) { compSupportsISA = isas.GetFlagsRaw(); } unsigned compFlags; // method attributes unsigned instrCount; unsigned lvRefCount; codeOptimize compCodeOpt; // what type of code optimizations bool compUseCMOV; // optimize maximally and/or favor speed over size? #define DEFAULT_MIN_OPTS_CODE_SIZE 60000 #define DEFAULT_MIN_OPTS_INSTR_COUNT 20000 #define DEFAULT_MIN_OPTS_BB_COUNT 2000 #define DEFAULT_MIN_OPTS_LV_NUM_COUNT 2000 #define DEFAULT_MIN_OPTS_LV_REF_COUNT 8000 // Maximun number of locals before turning off the inlining #define MAX_LV_NUM_COUNT_FOR_INLINING 512 bool compMinOpts; bool compMinOptsIsSet; #ifdef DEBUG mutable bool compMinOptsIsUsed; bool MinOpts() const { assert(compMinOptsIsSet); compMinOptsIsUsed = true; return compMinOpts; } bool IsMinOptsSet() const { return compMinOptsIsSet; } #else // !DEBUG bool MinOpts() const { return compMinOpts; } bool IsMinOptsSet() const { return compMinOptsIsSet; } #endif // !DEBUG bool OptimizationDisabled() const { return MinOpts() || compDbgCode; } bool OptimizationEnabled() const { return !OptimizationDisabled(); } void SetMinOpts(bool val) { assert(!compMinOptsIsUsed); assert(!compMinOptsIsSet || (compMinOpts == val)); compMinOpts = val; compMinOptsIsSet = true; } // true if the CLFLG_* for an optimization is set. bool OptEnabled(unsigned optFlag) const { return !!(compFlags & optFlag); } #ifdef FEATURE_READYTORUN bool IsReadyToRun() const { return jitFlags->IsSet(JitFlags::JIT_FLAG_READYTORUN); } #else bool IsReadyToRun() const { return false; } #endif // Check if the compilation is control-flow guard enabled. bool IsCFGEnabled() const { #if defined(TARGET_ARM64) || defined(TARGET_AMD64) // On these platforms we assume the register that the target is // passed in is preserved by the validator and take care to get the // target from the register for the call (even in debug mode). static_assert_no_msg((RBM_VALIDATE_INDIRECT_CALL_TRASH & (1 << REG_VALIDATE_INDIRECT_CALL_ADDR)) == 0); if (JitConfig.JitForceControlFlowGuard()) return true; return jitFlags->IsSet(JitFlags::JIT_FLAG_ENABLE_CFG); #else // The remaining platforms are not supported and would require some // work to support. // // ARM32: // The ARM32 validator does not preserve any volatile registers // which means we have to take special care to allocate and use a // callee-saved register (reloading the target from memory is a // security issue). // // x86: // On x86 some VSD calls disassemble the call site and expect an // indirect call which is fundamentally incompatible with CFG. // This would require a different way to pass this information // through. // return false; #endif } #ifdef FEATURE_ON_STACK_REPLACEMENT bool IsOSR() const { return jitFlags->IsSet(JitFlags::JIT_FLAG_OSR); } #else bool IsOSR() const { return false; } #endif // true if we should use the PINVOKE_{BEGIN,END} helpers instead of generating // PInvoke transitions inline. Normally used by R2R, but also used when generating a reverse pinvoke frame, as // the current logic for frame setup initializes and pushes // the InlinedCallFrame before performing the Reverse PInvoke transition, which is invalid (as frames cannot // safely be pushed/popped while the thread is in a preemptive state.). bool ShouldUsePInvokeHelpers() { return jitFlags->IsSet(JitFlags::JIT_FLAG_USE_PINVOKE_HELPERS) || jitFlags->IsSet(JitFlags::JIT_FLAG_REVERSE_PINVOKE); } // true if we should use insert the REVERSE_PINVOKE_{ENTER,EXIT} helpers in the method // prolog/epilog bool IsReversePInvoke() { return jitFlags->IsSet(JitFlags::JIT_FLAG_REVERSE_PINVOKE); } bool compScopeInfo; // Generate the LocalVar info ? bool compDbgCode; // Generate debugger-friendly code? bool compDbgInfo; // Gather debugging info? bool compDbgEnC; #ifdef PROFILING_SUPPORTED bool compNoPInvokeInlineCB; #else static const bool compNoPInvokeInlineCB; #endif #ifdef DEBUG bool compGcChecks; // Check arguments and return values to ensure they are sane #endif #if defined(DEBUG) && defined(TARGET_XARCH) bool compStackCheckOnRet; // Check stack pointer on return to ensure it is correct. #endif // defined(DEBUG) && defined(TARGET_XARCH) #if defined(DEBUG) && defined(TARGET_X86) bool compStackCheckOnCall; // Check stack pointer after call to ensure it is correct. Only for x86. #endif // defined(DEBUG) && defined(TARGET_X86) bool compReloc; // Generate relocs for pointers in code, true for all ngen/prejit codegen #ifdef DEBUG #if defined(TARGET_XARCH) bool compEnablePCRelAddr; // Whether absolute addr be encoded as PC-rel offset by RyuJIT where possible #endif #endif // DEBUG #ifdef UNIX_AMD64_ABI // This flag is indicating if there is a need to align the frame. // On AMD64-Windows, if there are calls, 4 slots for the outgoing ars are allocated, except for // FastTailCall. This slots makes the frame size non-zero, so alignment logic will be called. // On AMD64-Unix, there are no such slots. There is a possibility to have calls in the method with frame size of // 0. The frame alignment logic won't kick in. This flags takes care of the AMD64-Unix case by remembering that // there are calls and making sure the frame alignment logic is executed. bool compNeedToAlignFrame; #endif // UNIX_AMD64_ABI bool compProcedureSplitting; // Separate cold code from hot code bool genFPorder; // Preserve FP order (operations are non-commutative) bool genFPopt; // Can we do frame-pointer-omission optimization? bool altJit; // True if we are an altjit and are compiling this method #ifdef OPT_CONFIG bool optRepeat; // Repeat optimizer phases k times #endif #ifdef DEBUG bool compProcedureSplittingEH; // Separate cold code from hot code for functions with EH bool dspCode; // Display native code generated bool dspEHTable; // Display the EH table reported to the VM bool dspDebugInfo; // Display the Debug info reported to the VM bool dspInstrs; // Display the IL instructions intermixed with the native code output bool dspLines; // Display source-code lines intermixed with native code output bool dmpHex; // Display raw bytes in hex of native code output bool varNames; // Display variables names in native code output bool disAsm; // Display native code as it is generated bool disAsmSpilled; // Display native code when any register spilling occurs bool disasmWithGC; // Display GC info interleaved with disassembly. bool disDiffable; // Makes the Disassembly code 'diff-able' bool disAddr; // Display process address next to each instruction in disassembly code bool disAlignment; // Display alignment boundaries in disassembly code bool disAsm2; // Display native code after it is generated using external disassembler bool dspOrder; // Display names of each of the methods that we ngen/jit bool dspUnwind; // Display the unwind info output bool dspDiffable; // Makes the Jit Dump 'diff-able' (currently uses same COMPlus_* flag as disDiffable) bool compLongAddress; // Force using large pseudo instructions for long address // (IF_LARGEJMP/IF_LARGEADR/IF_LARGLDC) bool dspGCtbls; // Display the GC tables #endif bool compExpandCallsEarly; // True if we should expand virtual call targets early for this method // Default numbers used to perform loop alignment. All the numbers are choosen // based on experimenting with various benchmarks. // Default minimum loop block weight required to enable loop alignment. #define DEFAULT_ALIGN_LOOP_MIN_BLOCK_WEIGHT 4 // By default a loop will be aligned at 32B address boundary to get better // performance as per architecture manuals. #define DEFAULT_ALIGN_LOOP_BOUNDARY 0x20 // For non-adaptive loop alignment, by default, only align a loop whose size is // at most 3 times the alignment block size. If the loop is bigger than that, it is most // likely complicated enough that loop alignment will not impact performance. #define DEFAULT_MAX_LOOPSIZE_FOR_ALIGN DEFAULT_ALIGN_LOOP_BOUNDARY * 3 #ifdef DEBUG // Loop alignment variables // If set, for non-adaptive alignment, ensure loop jmps are not on or cross alignment boundary. bool compJitAlignLoopForJcc; #endif // For non-adaptive alignment, minimum loop size (in bytes) for which alignment will be done. unsigned short compJitAlignLoopMaxCodeSize; // Minimum weight needed for the first block of a loop to make it a candidate for alignment. unsigned short compJitAlignLoopMinBlockWeight; // For non-adaptive alignment, address boundary (power of 2) at which loop alignment should // be done. By default, 32B. unsigned short compJitAlignLoopBoundary; // Padding limit to align a loop. unsigned short compJitAlignPaddingLimit; // If set, perform adaptive loop alignment that limits number of padding based on loop size. bool compJitAlignLoopAdaptive; // If set, tries to hide alignment instructions behind unconditional jumps. bool compJitHideAlignBehindJmp; #ifdef LATE_DISASM bool doLateDisasm; // Run the late disassembler #endif // LATE_DISASM #if DUMP_GC_TABLES && !defined(DEBUG) #pragma message("NOTE: this non-debug build has GC ptr table dumping always enabled!") static const bool dspGCtbls = true; #endif #ifdef PROFILING_SUPPORTED // Whether to emit Enter/Leave/TailCall hooks using a dummy stub (DummyProfilerELTStub()). // This option helps make the JIT behave as if it is running under a profiler. bool compJitELTHookEnabled; #endif // PROFILING_SUPPORTED #if FEATURE_TAILCALL_OPT // Whether opportunistic or implicit tail call optimization is enabled. bool compTailCallOpt; // Whether optimization of transforming a recursive tail call into a loop is enabled. bool compTailCallLoopOpt; #endif #if FEATURE_FASTTAILCALL // Whether fast tail calls are allowed. bool compFastTailCalls; #endif // FEATURE_FASTTAILCALL #if defined(TARGET_ARM64) // Decision about whether to save FP/LR registers with callee-saved registers (see // COMPlus_JitSaveFpLrWithCalleSavedRegisters). int compJitSaveFpLrWithCalleeSavedRegisters; #endif // defined(TARGET_ARM64) #ifdef CONFIGURABLE_ARM_ABI bool compUseSoftFP = false; #else #ifdef ARM_SOFTFP static const bool compUseSoftFP = true; #else // !ARM_SOFTFP static const bool compUseSoftFP = false; #endif // ARM_SOFTFP #endif // CONFIGURABLE_ARM_ABI } opts; static bool s_pAltJitExcludeAssembliesListInitialized; static AssemblyNamesList2* s_pAltJitExcludeAssembliesList; #ifdef DEBUG static bool s_pJitDisasmIncludeAssembliesListInitialized; static AssemblyNamesList2* s_pJitDisasmIncludeAssembliesList; static bool s_pJitFunctionFileInitialized; static MethodSet* s_pJitMethodSet; #endif // DEBUG #ifdef DEBUG // silence warning of cast to greater size. It is easier to silence than construct code the compiler is happy with, and // it is safe in this case #pragma warning(push) #pragma warning(disable : 4312) template <typename T> T dspPtr(T p) { return (p == ZERO) ? ZERO : (opts.dspDiffable ? T(0xD1FFAB1E) : p); } template <typename T> T dspOffset(T o) { return (o == ZERO) ? ZERO : (opts.dspDiffable ? T(0xD1FFAB1E) : o); } #pragma warning(pop) static int dspTreeID(GenTree* tree) { return tree->gtTreeID; } static void printStmtID(Statement* stmt) { assert(stmt != nullptr); printf(FMT_STMT, stmt->GetID()); } static void printTreeID(GenTree* tree) { if (tree == nullptr) { printf("[------]"); } else { printf("[%06d]", dspTreeID(tree)); } } const char* pgoSourceToString(ICorJitInfo::PgoSource p); const char* devirtualizationDetailToString(CORINFO_DEVIRTUALIZATION_DETAIL detail); #endif // DEBUG // clang-format off #define STRESS_MODES \ \ STRESS_MODE(NONE) \ \ /* "Variations" stress areas which we try to mix up with each other. */ \ /* These should not be exhaustively used as they might */ \ /* hide/trivialize other areas */ \ \ STRESS_MODE(REGS) \ STRESS_MODE(DBL_ALN) \ STRESS_MODE(LCL_FLDS) \ STRESS_MODE(UNROLL_LOOPS) \ STRESS_MODE(MAKE_CSE) \ STRESS_MODE(LEGACY_INLINE) \ STRESS_MODE(CLONE_EXPR) \ STRESS_MODE(USE_CMOV) \ STRESS_MODE(FOLD) \ STRESS_MODE(MERGED_RETURNS) \ STRESS_MODE(BB_PROFILE) \ STRESS_MODE(OPT_BOOLS_GC) \ STRESS_MODE(REMORPH_TREES) \ STRESS_MODE(64RSLT_MUL) \ STRESS_MODE(DO_WHILE_LOOPS) \ STRESS_MODE(MIN_OPTS) \ STRESS_MODE(REVERSE_FLAG) /* Will set GTF_REVERSE_OPS whenever we can */ \ STRESS_MODE(REVERSE_COMMA) /* Will reverse commas created with gtNewCommaNode */ \ STRESS_MODE(TAILCALL) /* Will make the call as a tailcall whenever legal */ \ STRESS_MODE(CATCH_ARG) /* Will spill catch arg */ \ STRESS_MODE(UNSAFE_BUFFER_CHECKS) \ STRESS_MODE(NULL_OBJECT_CHECK) \ STRESS_MODE(PINVOKE_RESTORE_ESP) \ STRESS_MODE(RANDOM_INLINE) \ STRESS_MODE(SWITCH_CMP_BR_EXPANSION) \ STRESS_MODE(GENERIC_VARN) \ STRESS_MODE(PROFILER_CALLBACKS) /* Will generate profiler hooks for ELT callbacks */ \ STRESS_MODE(BYREF_PROMOTION) /* Change undoPromotion decisions for byrefs */ \ STRESS_MODE(PROMOTE_FEWER_STRUCTS)/* Don't promote some structs that can be promoted */ \ STRESS_MODE(VN_BUDGET)/* Randomize the VN budget */ \ \ /* After COUNT_VARN, stress level 2 does all of these all the time */ \ \ STRESS_MODE(COUNT_VARN) \ \ /* "Check" stress areas that can be exhaustively used if we */ \ /* dont care about performance at all */ \ \ STRESS_MODE(FORCE_INLINE) /* Treat every method as AggressiveInlining */ \ STRESS_MODE(CHK_FLOW_UPDATE) \ STRESS_MODE(EMITTER) \ STRESS_MODE(CHK_REIMPORT) \ STRESS_MODE(FLATFP) \ STRESS_MODE(GENERIC_CHECK) \ STRESS_MODE(COUNT) enum compStressArea { #define STRESS_MODE(mode) STRESS_##mode, STRESS_MODES #undef STRESS_MODE }; // clang-format on #ifdef DEBUG static const LPCWSTR s_compStressModeNames[STRESS_COUNT + 1]; BYTE compActiveStressModes[STRESS_COUNT]; #endif // DEBUG #define MAX_STRESS_WEIGHT 100 bool compStressCompile(compStressArea stressArea, unsigned weightPercentage); bool compStressCompileHelper(compStressArea stressArea, unsigned weightPercentage); #ifdef DEBUG bool compInlineStress() { return compStressCompile(STRESS_LEGACY_INLINE, 50); } bool compRandomInlineStress() { return compStressCompile(STRESS_RANDOM_INLINE, 50); } bool compPromoteFewerStructs(unsigned lclNum); #endif // DEBUG bool compTailCallStress() { #ifdef DEBUG // Do not stress tailcalls in IL stubs as the runtime creates several IL // stubs to implement the tailcall mechanism, which would then // recursively create more IL stubs. return !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && (JitConfig.TailcallStress() != 0 || compStressCompile(STRESS_TAILCALL, 5)); #else return false; #endif } const char* compGetTieringName(bool wantShortName = false) const; const char* compGetStressMessage() const; codeOptimize compCodeOpt() const { #if 0 // Switching between size & speed has measurable throughput impact // (3.5% on NGen CoreLib when measured). It used to be enabled for // DEBUG, but should generate identical code between CHK & RET builds, // so that's not acceptable. // TODO-Throughput: Figure out what to do about size vs. speed & throughput. // Investigate the cause of the throughput regression. return opts.compCodeOpt; #else return BLENDED_CODE; #endif } //--------------------- Info about the procedure -------------------------- struct Info { COMP_HANDLE compCompHnd; CORINFO_MODULE_HANDLE compScopeHnd; CORINFO_CLASS_HANDLE compClassHnd; CORINFO_METHOD_HANDLE compMethodHnd; CORINFO_METHOD_INFO* compMethodInfo; bool hasCircularClassConstraints; bool hasCircularMethodConstraints; #if defined(DEBUG) || defined(LATE_DISASM) || DUMP_FLOWGRAPHS const char* compMethodName; const char* compClassName; const char* compFullName; double compPerfScore; int compMethodSuperPMIIndex; // useful when debugging under SuperPMI #endif // defined(DEBUG) || defined(LATE_DISASM) || DUMP_FLOWGRAPHS #if defined(DEBUG) || defined(INLINE_DATA) // Method hash is logically const, but computed // on first demand. mutable unsigned compMethodHashPrivate; unsigned compMethodHash() const; #endif // defined(DEBUG) || defined(INLINE_DATA) #ifdef PSEUDORANDOM_NOP_INSERTION // things for pseudorandom nop insertion unsigned compChecksum; CLRRandom compRNG; #endif // The following holds the FLG_xxxx flags for the method we're compiling. unsigned compFlags; // The following holds the class attributes for the method we're compiling. unsigned compClassAttr; const BYTE* compCode; IL_OFFSET compILCodeSize; // The IL code size IL_OFFSET compILImportSize; // Estimated amount of IL actually imported IL_OFFSET compILEntry; // The IL entry point (normally 0) PatchpointInfo* compPatchpointInfo; // Patchpoint data for OSR (normally nullptr) UNATIVE_OFFSET compNativeCodeSize; // The native code size, after instructions are issued. This // is less than (compTotalHotCodeSize + compTotalColdCodeSize) only if: // (1) the code is not hot/cold split, and we issued less code than we expected, or // (2) the code is hot/cold split, and we issued less code than we expected // in the cold section (the hot section will always be padded out to compTotalHotCodeSize). bool compIsStatic : 1; // Is the method static (no 'this' pointer)? bool compIsVarArgs : 1; // Does the method have varargs parameters? bool compInitMem : 1; // Is the CORINFO_OPT_INIT_LOCALS bit set in the method info options? bool compProfilerCallback : 1; // JIT inserted a profiler Enter callback bool compPublishStubParam : 1; // EAX captured in prolog will be available through an intrinsic bool compHasNextCallRetAddr : 1; // The NextCallReturnAddress intrinsic is used. var_types compRetType; // Return type of the method as declared in IL var_types compRetNativeType; // Normalized return type as per target arch ABI unsigned compILargsCount; // Number of arguments (incl. implicit but not hidden) unsigned compArgsCount; // Number of arguments (incl. implicit and hidden) #if FEATURE_FASTTAILCALL unsigned compArgStackSize; // Incoming argument stack size in bytes #endif // FEATURE_FASTTAILCALL unsigned compRetBuffArg; // position of hidden return param var (0, 1) (BAD_VAR_NUM means not present); int compTypeCtxtArg; // position of hidden param for type context for generic code (CORINFO_CALLCONV_PARAMTYPE) unsigned compThisArg; // position of implicit this pointer param (not to be confused with lvaArg0Var) unsigned compILlocalsCount; // Number of vars : args + locals (incl. implicit but not hidden) unsigned compLocalsCount; // Number of vars : args + locals (incl. implicit and hidden) unsigned compMaxStack; UNATIVE_OFFSET compTotalHotCodeSize; // Total number of bytes of Hot Code in the method UNATIVE_OFFSET compTotalColdCodeSize; // Total number of bytes of Cold Code in the method unsigned compUnmanagedCallCountWithGCTransition; // count of unmanaged calls with GC transition. CorInfoCallConvExtension compCallConv; // The entry-point calling convention for this method. unsigned compLvFrameListRoot; // lclNum for the Frame root unsigned compXcptnsCount; // Number of exception-handling clauses read in the method's IL. // You should generally use compHndBBtabCount instead: it is the // current number of EH clauses (after additions like synchronized // methods and funclets, and removals like unreachable code deletion). Target::ArgOrder compArgOrder; bool compMatchedVM; // true if the VM is "matched": either the JIT is a cross-compiler // and the VM expects that, or the JIT is a "self-host" compiler // (e.g., x86 hosted targeting x86) and the VM expects that. /* The following holds IL scope information about local variables. */ unsigned compVarScopesCount; VarScopeDsc* compVarScopes; /* The following holds information about instr offsets for * which we need to report IP-mappings */ IL_OFFSET* compStmtOffsets; // sorted unsigned compStmtOffsetsCount; ICorDebugInfo::BoundaryTypes compStmtOffsetsImplicit; #define CPU_X86 0x0100 // The generic X86 CPU #define CPU_X86_PENTIUM_4 0x0110 #define CPU_X64 0x0200 // The generic x64 CPU #define CPU_AMD_X64 0x0210 // AMD x64 CPU #define CPU_INTEL_X64 0x0240 // Intel x64 CPU #define CPU_ARM 0x0300 // The generic ARM CPU #define CPU_ARM64 0x0400 // The generic ARM64 CPU unsigned genCPU; // What CPU are we running on // Number of class profile probes in this method unsigned compClassProbeCount; } info; // Returns true if the method being compiled returns a non-void and non-struct value. // Note that lvaInitTypeRef() normalizes compRetNativeType for struct returns in a // single register as per target arch ABI (e.g on Amd64 Windows structs of size 1, 2, // 4 or 8 gets normalized to TYP_BYTE/TYP_SHORT/TYP_INT/TYP_LONG; On Arm HFA structs). // Methods returning such structs are considered to return non-struct return value and // this method returns true in that case. bool compMethodReturnsNativeScalarType() { return (info.compRetType != TYP_VOID) && !varTypeIsStruct(info.compRetNativeType); } // Returns true if the method being compiled returns RetBuf addr as its return value bool compMethodReturnsRetBufAddr() { // There are cases where implicit RetBuf argument should be explicitly returned in a register. // In such cases the return type is changed to TYP_BYREF and appropriate IR is generated. // These cases are: CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_AMD64 // 1. on x64 Windows and Unix the address of RetBuf needs to be returned by // methods with hidden RetBufArg in RAX. In such case GT_RETURN is of TYP_BYREF, // returning the address of RetBuf. return (info.compRetBuffArg != BAD_VAR_NUM); #else // TARGET_AMD64 #ifdef PROFILING_SUPPORTED // 2. Profiler Leave callback expects the address of retbuf as return value for // methods with hidden RetBuf argument. impReturnInstruction() when profiler // callbacks are needed creates GT_RETURN(TYP_BYREF, op1 = Addr of RetBuf) for // methods with hidden RetBufArg. if (compIsProfilerHookNeeded()) { return (info.compRetBuffArg != BAD_VAR_NUM); } #endif // 3. Windows ARM64 native instance calling convention requires the address of RetBuff // to be returned in x0. CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_ARM64) if (TargetOS::IsWindows) { auto callConv = info.compCallConv; if (callConvIsInstanceMethodCallConv(callConv)) { return (info.compRetBuffArg != BAD_VAR_NUM); } } #endif // TARGET_ARM64 // 4. x86 unmanaged calling conventions require the address of RetBuff to be returned in eax. CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_X86) if (info.compCallConv != CorInfoCallConvExtension::Managed) { return (info.compRetBuffArg != BAD_VAR_NUM); } #endif return false; #endif // TARGET_AMD64 } // Returns true if the method returns a value in more than one return register // TODO-ARM-Bug: Deal with multi-register genReturnLocaled structs? // TODO-ARM64: Does this apply for ARM64 too? bool compMethodReturnsMultiRegRetType() { #if FEATURE_MULTIREG_RET #if defined(TARGET_X86) // On x86, 64-bit longs and structs are returned in multiple registers return varTypeIsLong(info.compRetNativeType) || (varTypeIsStruct(info.compRetNativeType) && (info.compRetBuffArg == BAD_VAR_NUM)); #else // targets: X64-UNIX, ARM64 or ARM32 // On all other targets that support multireg return values: // Methods returning a struct in multiple registers have a return value of TYP_STRUCT. // Such method's compRetNativeType is TYP_STRUCT without a hidden RetBufArg return varTypeIsStruct(info.compRetNativeType) && (info.compRetBuffArg == BAD_VAR_NUM); #endif // TARGET_XXX #else // not FEATURE_MULTIREG_RET // For this architecture there are no multireg returns return false; #endif // FEATURE_MULTIREG_RET } bool compEnregLocals() { return ((opts.compFlags & CLFLG_REGVAR) != 0); } bool compEnregStructLocals() { return (JitConfig.JitEnregStructLocals() != 0); } bool compObjectStackAllocation() { return (JitConfig.JitObjectStackAllocation() != 0); } // Returns true if the method returns a value in more than one return register, // it should replace/be merged with compMethodReturnsMultiRegRetType when #36868 is fixed. // The difference from original `compMethodReturnsMultiRegRetType` is in ARM64 SIMD* handling, // this method correctly returns false for it (it is passed as HVA), when the original returns true. bool compMethodReturnsMultiRegRegTypeAlternate() { #if FEATURE_MULTIREG_RET #if defined(TARGET_X86) // On x86, 64-bit longs and structs are returned in multiple registers return varTypeIsLong(info.compRetNativeType) || (varTypeIsStruct(info.compRetNativeType) && (info.compRetBuffArg == BAD_VAR_NUM)); #else // targets: X64-UNIX, ARM64 or ARM32 #if defined(TARGET_ARM64) // TYP_SIMD* are returned in one register. if (varTypeIsSIMD(info.compRetNativeType)) { return false; } #endif // On all other targets that support multireg return values: // Methods returning a struct in multiple registers have a return value of TYP_STRUCT. // Such method's compRetNativeType is TYP_STRUCT without a hidden RetBufArg return varTypeIsStruct(info.compRetNativeType) && (info.compRetBuffArg == BAD_VAR_NUM); #endif // TARGET_XXX #else // not FEATURE_MULTIREG_RET // For this architecture there are no multireg returns return false; #endif // FEATURE_MULTIREG_RET } // Returns true if the method being compiled returns a value bool compMethodHasRetVal() { return compMethodReturnsNativeScalarType() || compMethodReturnsRetBufAddr() || compMethodReturnsMultiRegRetType(); } // Returns true if the method requires a PInvoke prolog and epilog bool compMethodRequiresPInvokeFrame() { return (info.compUnmanagedCallCountWithGCTransition > 0); } // Returns true if address-exposed user variables should be poisoned with a recognizable value bool compShouldPoisonFrame() { #ifdef FEATURE_ON_STACK_REPLACEMENT if (opts.IsOSR()) return false; #endif return !info.compInitMem && opts.compDbgCode; } // Returns true if the jit supports having patchpoints in this method. // Optionally, get the reason why not. bool compCanHavePatchpoints(const char** reason = nullptr); #if defined(DEBUG) void compDispLocalVars(); #endif // DEBUG private: class ClassLayoutTable* m_classLayoutTable; class ClassLayoutTable* typCreateClassLayoutTable(); class ClassLayoutTable* typGetClassLayoutTable(); public: // Get the layout having the specified layout number. ClassLayout* typGetLayoutByNum(unsigned layoutNum); // Get the layout number of the specified layout. unsigned typGetLayoutNum(ClassLayout* layout); // Get the layout having the specified size but no class handle. ClassLayout* typGetBlkLayout(unsigned blockSize); // Get the number of a layout having the specified size but no class handle. unsigned typGetBlkLayoutNum(unsigned blockSize); // Get the layout for the specified class handle. ClassLayout* typGetObjLayout(CORINFO_CLASS_HANDLE classHandle); // Get the number of a layout for the specified class handle. unsigned typGetObjLayoutNum(CORINFO_CLASS_HANDLE classHandle); //-------------------------- Global Compiler Data ------------------------------------ #ifdef DEBUG private: static LONG s_compMethodsCount; // to produce unique label names #endif public: #ifdef DEBUG LONG compMethodID; unsigned compGenTreeID; unsigned compStatementID; unsigned compBasicBlockID; #endif BasicBlock* compCurBB; // the current basic block in process Statement* compCurStmt; // the current statement in process GenTree* compCurTree; // the current tree in process // The following is used to create the 'method JIT info' block. size_t compInfoBlkSize; BYTE* compInfoBlkAddr; EHblkDsc* compHndBBtab; // array of EH data unsigned compHndBBtabCount; // element count of used elements in EH data array unsigned compHndBBtabAllocCount; // element count of allocated elements in EH data array #if defined(TARGET_X86) //------------------------------------------------------------------------- // Tracking of region covered by the monitor in synchronized methods void* syncStartEmitCookie; // the emitter cookie for first instruction after the call to MON_ENTER void* syncEndEmitCookie; // the emitter cookie for first instruction after the call to MON_EXIT #endif // !TARGET_X86 Phases mostRecentlyActivePhase; // the most recently active phase PhaseChecks activePhaseChecks; // the currently active phase checks //------------------------------------------------------------------------- // The following keeps track of how many bytes of local frame space we've // grabbed so far in the current function, and how many argument bytes we // need to pop when we return. // unsigned compLclFrameSize; // secObject+lclBlk+locals+temps // Count of callee-saved regs we pushed in the prolog. // Does not include EBP for isFramePointerUsed() and double-aligned frames. // In case of Amd64 this doesn't include float regs saved on stack. unsigned compCalleeRegsPushed; #if defined(TARGET_XARCH) // Mask of callee saved float regs on stack. regMaskTP compCalleeFPRegsSavedMask; #endif #ifdef TARGET_AMD64 // Quirk for VS debug-launch scenario to work: // Bytes of padding between save-reg area and locals. #define VSQUIRK_STACK_PAD (2 * REGSIZE_BYTES) unsigned compVSQuirkStackPaddingNeeded; #endif unsigned compArgSize; // total size of arguments in bytes (including register args (lvIsRegArg)) unsigned compMapILargNum(unsigned ILargNum); // map accounting for hidden args unsigned compMapILvarNum(unsigned ILvarNum); // map accounting for hidden args unsigned compMap2ILvarNum(unsigned varNum) const; // map accounting for hidden args #if defined(TARGET_ARM64) struct FrameInfo { // Frame type (1-5) int frameType; // Distance from established (method body) SP to base of callee save area int calleeSaveSpOffset; // Amount to subtract from SP before saving (prolog) OR // to add to SP after restoring (epilog) callee saves int calleeSaveSpDelta; // Distance from established SP to where caller's FP was saved int offsetSpToSavedFp; } compFrameInfo; #endif //------------------------------------------------------------------------- static void compStartup(); // One-time initialization static void compShutdown(); // One-time finalization void compInit(ArenaAllocator* pAlloc, CORINFO_METHOD_HANDLE methodHnd, COMP_HANDLE compHnd, CORINFO_METHOD_INFO* methodInfo, InlineInfo* inlineInfo); void compDone(); static void compDisplayStaticSizes(FILE* fout); //------------ Some utility functions -------------- void* compGetHelperFtn(CorInfoHelpFunc ftnNum, /* IN */ void** ppIndirection); /* OUT */ // Several JIT/EE interface functions return a CorInfoType, and also return a // class handle as an out parameter if the type is a value class. Returns the // size of the type these describe. unsigned compGetTypeSize(CorInfoType cit, CORINFO_CLASS_HANDLE clsHnd); // Returns true if the method being compiled has a return buffer. bool compHasRetBuffArg(); #ifdef DEBUG // Components used by the compiler may write unit test suites, and // have them run within this method. They will be run only once per process, and only // in debug. (Perhaps should be under the control of a COMPlus_ flag.) // These should fail by asserting. void compDoComponentUnitTestsOnce(); #endif // DEBUG int compCompile(CORINFO_MODULE_HANDLE classPtr, void** methodCodePtr, uint32_t* methodCodeSize, JitFlags* compileFlags); void compCompileFinish(); int compCompileHelper(CORINFO_MODULE_HANDLE classPtr, COMP_HANDLE compHnd, CORINFO_METHOD_INFO* methodInfo, void** methodCodePtr, uint32_t* methodCodeSize, JitFlags* compileFlag); ArenaAllocator* compGetArenaAllocator(); void generatePatchpointInfo(); #if MEASURE_MEM_ALLOC static bool s_dspMemStats; // Display per-phase memory statistics for every function #endif // MEASURE_MEM_ALLOC #if LOOP_HOIST_STATS unsigned m_loopsConsidered; bool m_curLoopHasHoistedExpression; unsigned m_loopsWithHoistedExpressions; unsigned m_totalHoistedExpressions; void AddLoopHoistStats(); void PrintPerMethodLoopHoistStats(); static CritSecObject s_loopHoistStatsLock; // This lock protects the data structures below. static unsigned s_loopsConsidered; static unsigned s_loopsWithHoistedExpressions; static unsigned s_totalHoistedExpressions; static void PrintAggregateLoopHoistStats(FILE* f); #endif // LOOP_HOIST_STATS #if TRACK_ENREG_STATS class EnregisterStats { private: unsigned m_totalNumberOfVars; unsigned m_totalNumberOfStructVars; unsigned m_totalNumberOfEnregVars; unsigned m_totalNumberOfStructEnregVars; unsigned m_addrExposed; unsigned m_VMNeedsStackAddr; unsigned m_localField; unsigned m_blockOp; unsigned m_dontEnregStructs; unsigned m_notRegSizeStruct; unsigned m_structArg; unsigned m_lclAddrNode; unsigned m_castTakesAddr; unsigned m_storeBlkSrc; unsigned m_oneAsgRetyping; unsigned m_swizzleArg; unsigned m_blockOpRet; unsigned m_returnSpCheck; unsigned m_simdUserForcesDep; unsigned m_liveInOutHndlr; unsigned m_depField; unsigned m_noRegVars; unsigned m_minOptsGC; #ifdef JIT32_GCENCODER unsigned m_PinningRef; #endif // JIT32_GCENCODER #if !defined(TARGET_64BIT) unsigned m_longParamField; #endif // !TARGET_64BIT unsigned m_parentExposed; unsigned m_tooConservative; unsigned m_escapeAddress; unsigned m_osrExposed; unsigned m_stressLclFld; unsigned m_copyFldByFld; unsigned m_dispatchRetBuf; unsigned m_wideIndir; public: void RecordLocal(const LclVarDsc* varDsc); void Dump(FILE* fout) const; }; static EnregisterStats s_enregisterStats; #endif // TRACK_ENREG_STATS bool compIsForImportOnly(); bool compIsForInlining() const; bool compDonotInline(); #ifdef DEBUG // Get the default fill char value we randomize this value when JitStress is enabled. static unsigned char compGetJitDefaultFill(Compiler* comp); const char* compLocalVarName(unsigned varNum, unsigned offs); VarName compVarName(regNumber reg, bool isFloatReg = false); const char* compRegVarName(regNumber reg, bool displayVar = false, bool isFloatReg = false); const char* compRegNameForSize(regNumber reg, size_t size); const char* compFPregVarName(unsigned fpReg, bool displayVar = false); void compDspSrcLinesByNativeIP(UNATIVE_OFFSET curIP); void compDspSrcLinesByLineNum(unsigned line, bool seek = false); #endif // DEBUG //------------------------------------------------------------------------- struct VarScopeListNode { VarScopeDsc* data; VarScopeListNode* next; static VarScopeListNode* Create(VarScopeDsc* value, CompAllocator alloc) { VarScopeListNode* node = new (alloc) VarScopeListNode; node->data = value; node->next = nullptr; return node; } }; struct VarScopeMapInfo { VarScopeListNode* head; VarScopeListNode* tail; static VarScopeMapInfo* Create(VarScopeListNode* node, CompAllocator alloc) { VarScopeMapInfo* info = new (alloc) VarScopeMapInfo; info->head = node; info->tail = node; return info; } }; // Max value of scope count for which we would use linear search; for larger values we would use hashtable lookup. static const unsigned MAX_LINEAR_FIND_LCL_SCOPELIST = 32; typedef JitHashTable<unsigned, JitSmallPrimitiveKeyFuncs<unsigned>, VarScopeMapInfo*> VarNumToScopeDscMap; // Map to keep variables' scope indexed by varNum containing it's scope dscs at the index. VarNumToScopeDscMap* compVarScopeMap; VarScopeDsc* compFindLocalVar(unsigned varNum, unsigned lifeBeg, unsigned lifeEnd); VarScopeDsc* compFindLocalVar(unsigned varNum, unsigned offs); VarScopeDsc* compFindLocalVarLinear(unsigned varNum, unsigned offs); void compInitVarScopeMap(); VarScopeDsc** compEnterScopeList; // List has the offsets where variables // enter scope, sorted by instr offset unsigned compNextEnterScope; VarScopeDsc** compExitScopeList; // List has the offsets where variables // go out of scope, sorted by instr offset unsigned compNextExitScope; void compInitScopeLists(); void compResetScopeLists(); VarScopeDsc* compGetNextEnterScope(unsigned offs, bool scan = false); VarScopeDsc* compGetNextExitScope(unsigned offs, bool scan = false); void compProcessScopesUntil(unsigned offset, VARSET_TP* inScope, void (Compiler::*enterScopeFn)(VARSET_TP* inScope, VarScopeDsc*), void (Compiler::*exitScopeFn)(VARSET_TP* inScope, VarScopeDsc*)); #ifdef DEBUG void compDispScopeLists(); #endif // DEBUG bool compIsProfilerHookNeeded(); //------------------------------------------------------------------------- /* Statistical Data Gathering */ void compJitStats(); // call this function and enable // various ifdef's below for statistical data #if CALL_ARG_STATS void compCallArgStats(); static void compDispCallArgStats(FILE* fout); #endif //------------------------------------------------------------------------- protected: #ifdef DEBUG bool skipMethod(); #endif ArenaAllocator* compArenaAllocator; public: void compFunctionTraceStart(); void compFunctionTraceEnd(void* methodCodePtr, ULONG methodCodeSize, bool isNYI); protected: size_t compMaxUncheckedOffsetForNullObject; void compInitOptions(JitFlags* compileFlags); void compSetProcessor(); void compInitDebuggingInfo(); void compSetOptimizationLevel(); #ifdef TARGET_ARMARCH bool compRsvdRegCheck(FrameLayoutState curState); #endif void compCompile(void** methodCodePtr, uint32_t* methodCodeSize, JitFlags* compileFlags); // Clear annotations produced during optimizations; to be used between iterations when repeating opts. void ResetOptAnnotations(); // Regenerate loop descriptors; to be used between iterations when repeating opts. void RecomputeLoopInfo(); #ifdef PROFILING_SUPPORTED // Data required for generating profiler Enter/Leave/TailCall hooks bool compProfilerHookNeeded; // Whether profiler Enter/Leave/TailCall hook needs to be generated for the method void* compProfilerMethHnd; // Profiler handle of the method being compiled. Passed as param to ELT callbacks bool compProfilerMethHndIndirected; // Whether compProfilerHandle is pointer to the handle or is an actual handle #endif public: // Assumes called as part of process shutdown; does any compiler-specific work associated with that. static void ProcessShutdownWork(ICorStaticInfo* statInfo); CompAllocator getAllocator(CompMemKind cmk = CMK_Generic) { return CompAllocator(compArenaAllocator, cmk); } CompAllocator getAllocatorGC() { return getAllocator(CMK_GC); } CompAllocator getAllocatorLoopHoist() { return getAllocator(CMK_LoopHoist); } #ifdef DEBUG CompAllocator getAllocatorDebugOnly() { return getAllocator(CMK_DebugOnly); } #endif // DEBUG /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX typeInfo XX XX XX XX Checks for type compatibility and merges types XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: // Returns true if child is equal to or a subtype of parent for merge purposes // This support is necessary to suport attributes that are not described in // for example, signatures. For example, the permanent home byref (byref that // points to the gc heap), isn't a property of method signatures, therefore, // it is safe to have mismatches here (that tiCompatibleWith will not flag), // but when deciding if we need to reimport a block, we need to take these // in account bool tiMergeCompatibleWith(const typeInfo& pChild, const typeInfo& pParent, bool normalisedForStack) const; // Returns true if child is equal to or a subtype of parent. // normalisedForStack indicates that both types are normalised for the stack bool tiCompatibleWith(const typeInfo& pChild, const typeInfo& pParent, bool normalisedForStack) const; // Merges pDest and pSrc. Returns false if merge is undefined. // *pDest is modified to represent the merged type. Sets "*changed" to true // if this changes "*pDest". bool tiMergeToCommonParent(typeInfo* pDest, const typeInfo* pSrc, bool* changed) const; /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX IL verification stuff XX XX XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: // The following is used to track liveness of local variables, initialization // of valueclass constructors, and type safe use of IL instructions. // dynamic state info needed for verification EntryState verCurrentState; // this ptr of object type .ctors are considered intited only after // the base class ctor is called, or an alternate ctor is called. // An uninited this ptr can be used to access fields, but cannot // be used to call a member function. bool verTrackObjCtorInitState; void verInitBBEntryState(BasicBlock* block, EntryState* currentState); // Requires that "tis" is not TIS_Bottom -- it's a definite init/uninit state. void verSetThisInit(BasicBlock* block, ThisInitState tis); void verInitCurrentState(); void verResetCurrentState(BasicBlock* block, EntryState* currentState); // Merges the current verification state into the entry state of "block", return false if that merge fails, // TRUE if it succeeds. Further sets "*changed" to true if this changes the entry state of "block". bool verMergeEntryStates(BasicBlock* block, bool* changed); void verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg)); void verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool logMsg)); typeInfo verMakeTypeInfo(CORINFO_CLASS_HANDLE clsHnd, bool bashStructToRef = false); // converts from jit type representation to typeInfo typeInfo verMakeTypeInfo(CorInfoType ciType, CORINFO_CLASS_HANDLE clsHnd); // converts from jit type representation to typeInfo bool verIsSDArray(const typeInfo& ti); typeInfo verGetArrayElemType(const typeInfo& ti); typeInfo verParseArgSigToTypeInfo(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE args); bool verIsByRefLike(const typeInfo& ti); bool verIsSafeToReturnByRef(const typeInfo& ti); // generic type variables range over types that satisfy IsBoxable bool verIsBoxable(const typeInfo& ti); void DECLSPEC_NORETURN verRaiseVerifyException(INDEBUG(const char* reason) DEBUGARG(const char* file) DEBUGARG(unsigned line)); void verRaiseVerifyExceptionIfNeeded(INDEBUG(const char* reason) DEBUGARG(const char* file) DEBUGARG(unsigned line)); bool verCheckTailCallConstraint(OPCODE opcode, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call // on a type parameter? bool speculative // If true, won't throw if verificatoin fails. Instead it will // return false to the caller. // If false, it will throw. ); bool verIsBoxedValueType(const typeInfo& ti); void verVerifyCall(OPCODE opcode, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, bool tailCall, bool readonlyCall, // is this a "readonly." call? const BYTE* delegateCreateStart, const BYTE* codeAddr, CORINFO_CALL_INFO* callInfo DEBUGARG(const char* methodName)); bool verCheckDelegateCreation(const BYTE* delegateCreateStart, const BYTE* codeAddr, mdMemberRef& targetMemberRef); typeInfo verVerifySTIND(const typeInfo& ptr, const typeInfo& value, const typeInfo& instrType); typeInfo verVerifyLDIND(const typeInfo& ptr, const typeInfo& instrType); void verVerifyField(CORINFO_RESOLVED_TOKEN* pResolvedToken, const CORINFO_FIELD_INFO& fieldInfo, const typeInfo* tiThis, bool mutator, bool allowPlainStructAsThis = false); void verVerifyCond(const typeInfo& tiOp1, const typeInfo& tiOp2, unsigned opcode); void verVerifyThisPtrInitialised(); bool verIsCallToInitThisPtr(CORINFO_CLASS_HANDLE context, CORINFO_CLASS_HANDLE target); #ifdef DEBUG // One line log function. Default level is 0. Increasing it gives you // more log information // levels are currently unused: #define JITDUMP(level,...) (); void JitLogEE(unsigned level, const char* fmt, ...); bool compDebugBreak; bool compJitHaltMethod(); #endif /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX GS Security checks for unsafe buffers XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: struct ShadowParamVarInfo { FixedBitVect* assignGroup; // the closure set of variables whose values depend on each other unsigned shadowCopy; // Lcl var num, if not valid set to BAD_VAR_NUM static bool mayNeedShadowCopy(LclVarDsc* varDsc) { #if defined(TARGET_AMD64) // GS cookie logic to create shadow slots, create trees to copy reg args to shadow // slots and update all trees to refer to shadow slots is done immediately after // fgMorph(). Lsra could potentially mark a param as DoNotEnregister after JIT determines // not to shadow a parameter. Also, LSRA could potentially spill a param which is passed // in register. Therefore, conservatively all params may need a shadow copy. Note that // GS cookie logic further checks whether the param is a ptr or an unsafe buffer before // creating a shadow slot even though this routine returns true. // // TODO-AMD64-CQ: Revisit this conservative approach as it could create more shadow slots than // required. There are two cases under which a reg arg could potentially be used from its // home location: // a) LSRA marks it as DoNotEnregister (see LinearScan::identifyCandidates()) // b) LSRA spills it // // Possible solution to address case (a) // - The conditions under which LSRA marks a varDsc as DoNotEnregister could be checked // in this routine. Note that live out of exception handler is something we may not be // able to do it here since GS cookie logic is invoked ahead of liveness computation. // Therefore, for methods with exception handling and need GS cookie check we might have // to take conservative approach. // // Possible solution to address case (b) // - Whenver a parameter passed in an argument register needs to be spilled by LSRA, we // create a new spill temp if the method needs GS cookie check. return varDsc->lvIsParam; #else // !defined(TARGET_AMD64) return varDsc->lvIsParam && !varDsc->lvIsRegArg; #endif } #ifdef DEBUG void Print() { printf("assignGroup [%p]; shadowCopy: [%d];\n", assignGroup, shadowCopy); } #endif }; GSCookie* gsGlobalSecurityCookieAddr; // Address of global cookie for unsafe buffer checks GSCookie gsGlobalSecurityCookieVal; // Value of global cookie if addr is NULL ShadowParamVarInfo* gsShadowVarInfo; // Table used by shadow param analysis code void gsGSChecksInitCookie(); // Grabs cookie variable void gsCopyShadowParams(); // Identify vulnerable params and create dhadow copies bool gsFindVulnerableParams(); // Shadow param analysis code void gsParamsToShadows(); // Insert copy code and replave param uses by shadow static fgWalkPreFn gsMarkPtrsAndAssignGroups; // Shadow param analysis tree-walk static fgWalkPreFn gsReplaceShadowParams; // Shadow param replacement tree-walk #define DEFAULT_MAX_INLINE_SIZE 100 // Methods with > DEFAULT_MAX_INLINE_SIZE IL bytes will never be inlined. // This can be overwritten by setting complus_JITInlineSize env variable. #define DEFAULT_MAX_INLINE_DEPTH 20 // Methods at more than this level deep will not be inlined #define DEFAULT_MAX_LOCALLOC_TO_LOCAL_SIZE 32 // fixed locallocs of this size or smaller will convert to local buffers private: #ifdef FEATURE_JIT_METHOD_PERF JitTimer* pCompJitTimer; // Timer data structure (by phases) for current compilation. static CompTimeSummaryInfo s_compJitTimerSummary; // Summary of the Timer information for the whole run. static LPCWSTR JitTimeLogCsv(); // Retrieve the file name for CSV from ConfigDWORD. static LPCWSTR compJitTimeLogFilename; // If a log file for JIT time is desired, filename to write it to. #endif void BeginPhase(Phases phase); // Indicate the start of the given phase. void EndPhase(Phases phase); // Indicate the end of the given phase. #if MEASURE_CLRAPI_CALLS // Thin wrappers that call into JitTimer (if present). inline void CLRApiCallEnter(unsigned apix); inline void CLRApiCallLeave(unsigned apix); public: inline void CLR_API_Enter(API_ICorJitInfo_Names ename); inline void CLR_API_Leave(API_ICorJitInfo_Names ename); private: #endif #if defined(DEBUG) || defined(INLINE_DATA) // These variables are associated with maintaining SQM data about compile time. unsigned __int64 m_compCyclesAtEndOfInlining; // The thread-virtualized cycle count at the end of the inlining phase // in the current compilation. unsigned __int64 m_compCycles; // Net cycle count for current compilation DWORD m_compTickCountAtEndOfInlining; // The result of GetTickCount() (# ms since some epoch marker) at the end of // the inlining phase in the current compilation. #endif // defined(DEBUG) || defined(INLINE_DATA) // Records the SQM-relevant (cycles and tick count). Should be called after inlining is complete. // (We do this after inlining because this marks the last point at which the JIT is likely to cause // type-loading and class initialization). void RecordStateAtEndOfInlining(); // Assumes being called at the end of compilation. Update the SQM state. void RecordStateAtEndOfCompilation(); public: #if FUNC_INFO_LOGGING static LPCWSTR compJitFuncInfoFilename; // If a log file for per-function information is required, this is the // filename to write it to. static FILE* compJitFuncInfoFile; // And this is the actual FILE* to write to. #endif // FUNC_INFO_LOGGING Compiler* prevCompiler; // Previous compiler on stack for TLS Compiler* linked list for reentrant compilers. #if MEASURE_NOWAY void RecordNowayAssert(const char* filename, unsigned line, const char* condStr); #endif // MEASURE_NOWAY #ifndef FEATURE_TRACELOGGING // Should we actually fire the noway assert body and the exception handler? bool compShouldThrowOnNoway(); #else // FEATURE_TRACELOGGING // Should we actually fire the noway assert body and the exception handler? bool compShouldThrowOnNoway(const char* filename, unsigned line); // Telemetry instance to use per method compilation. JitTelemetry compJitTelemetry; // Get common parameters that have to be logged with most telemetry data. void compGetTelemetryDefaults(const char** assemblyName, const char** scopeName, const char** methodName, unsigned* methodHash); #endif // !FEATURE_TRACELOGGING #ifdef DEBUG private: NodeToTestDataMap* m_nodeTestData; static const unsigned FIRST_LOOP_HOIST_CSE_CLASS = 1000; unsigned m_loopHoistCSEClass; // LoopHoist test annotations turn into CSE requirements; we // label them with CSE Class #'s starting at FIRST_LOOP_HOIST_CSE_CLASS. // Current kept in this. public: NodeToTestDataMap* GetNodeTestData() { Compiler* compRoot = impInlineRoot(); if (compRoot->m_nodeTestData == nullptr) { compRoot->m_nodeTestData = new (getAllocatorDebugOnly()) NodeToTestDataMap(getAllocatorDebugOnly()); } return compRoot->m_nodeTestData; } typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, int> NodeToIntMap; // Returns the set (i.e., the domain of the result map) of nodes that are keys in m_nodeTestData, and // currently occur in the AST graph. NodeToIntMap* FindReachableNodesInNodeTestData(); // Node "from" is being eliminated, and being replaced by node "to". If "from" had any associated // test data, associate that data with "to". void TransferTestDataToNode(GenTree* from, GenTree* to); // These are the methods that test that the various conditions implied by the // test attributes are satisfied. void JitTestCheckSSA(); // SSA builder tests. void JitTestCheckVN(); // Value numbering tests. #endif // DEBUG // The "FieldSeqStore", for canonicalizing field sequences. See the definition of FieldSeqStore for // operations. FieldSeqStore* m_fieldSeqStore; FieldSeqStore* GetFieldSeqStore() { Compiler* compRoot = impInlineRoot(); if (compRoot->m_fieldSeqStore == nullptr) { // Create a CompAllocator that labels sub-structure with CMK_FieldSeqStore, and use that for allocation. CompAllocator ialloc(getAllocator(CMK_FieldSeqStore)); compRoot->m_fieldSeqStore = new (ialloc) FieldSeqStore(ialloc); } return compRoot->m_fieldSeqStore; } typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, FieldSeqNode*> NodeToFieldSeqMap; // Some nodes of "TYP_BYREF" or "TYP_I_IMPL" actually represent the address of a field within a struct, but since // the offset of the field is zero, there's no "GT_ADD" node. We normally attach a field sequence to the constant // that is added, but what do we do when that constant is zero, and is thus not present? We use this mechanism to // attach the field sequence directly to the address node. NodeToFieldSeqMap* m_zeroOffsetFieldMap; NodeToFieldSeqMap* GetZeroOffsetFieldMap() { // Don't need to worry about inlining here if (m_zeroOffsetFieldMap == nullptr) { // Create a CompAllocator that labels sub-structure with CMK_ZeroOffsetFieldMap, and use that for // allocation. CompAllocator ialloc(getAllocator(CMK_ZeroOffsetFieldMap)); m_zeroOffsetFieldMap = new (ialloc) NodeToFieldSeqMap(ialloc); } return m_zeroOffsetFieldMap; } // Requires that "op1" is a node of type "TYP_BYREF" or "TYP_I_IMPL". We are dereferencing this with the fields in // "fieldSeq", whose offsets are required all to be zero. Ensures that any field sequence annotation currently on // "op1" or its components is augmented by appending "fieldSeq". In practice, if "op1" is a GT_LCL_FLD, it has // a field sequence as a member; otherwise, it may be the addition of an a byref and a constant, where the const // has a field sequence -- in this case "fieldSeq" is appended to that of the constant; otherwise, we // record the the field sequence using the ZeroOffsetFieldMap described above. // // One exception above is that "op1" is a node of type "TYP_REF" where "op1" is a GT_LCL_VAR. // This happens when System.Object vtable pointer is a regular field at offset 0 in System.Private.CoreLib in // CoreRT. Such case is handled same as the default case. void fgAddFieldSeqForZeroOffset(GenTree* op1, FieldSeqNode* fieldSeq); typedef JitHashTable<const GenTree*, JitPtrKeyFuncs<GenTree>, ArrayInfo> NodeToArrayInfoMap; NodeToArrayInfoMap* m_arrayInfoMap; NodeToArrayInfoMap* GetArrayInfoMap() { Compiler* compRoot = impInlineRoot(); if (compRoot->m_arrayInfoMap == nullptr) { // Create a CompAllocator that labels sub-structure with CMK_ArrayInfoMap, and use that for allocation. CompAllocator ialloc(getAllocator(CMK_ArrayInfoMap)); compRoot->m_arrayInfoMap = new (ialloc) NodeToArrayInfoMap(ialloc); } return compRoot->m_arrayInfoMap; } //----------------------------------------------------------------------------------------------------------------- // Compiler::TryGetArrayInfo: // Given an indirection node, checks to see whether or not that indirection represents an array access, and // if so returns information about the array. // // Arguments: // indir - The `GT_IND` node. // arrayInfo (out) - Information about the accessed array if this function returns true. Undefined otherwise. // // Returns: // True if the `GT_IND` node represents an array access; false otherwise. bool TryGetArrayInfo(GenTreeIndir* indir, ArrayInfo* arrayInfo) { if ((indir->gtFlags & GTF_IND_ARR_INDEX) == 0) { return false; } if (indir->gtOp1->OperIs(GT_INDEX_ADDR)) { GenTreeIndexAddr* const indexAddr = indir->gtOp1->AsIndexAddr(); *arrayInfo = ArrayInfo(indexAddr->gtElemType, indexAddr->gtElemSize, indexAddr->gtElemOffset, indexAddr->gtStructElemClass); return true; } bool found = GetArrayInfoMap()->Lookup(indir, arrayInfo); assert(found); return true; } NodeToUnsignedMap* m_memorySsaMap[MemoryKindCount]; // In some cases, we want to assign intermediate SSA #'s to memory states, and know what nodes create those memory // states. (We do this for try blocks, where, if the try block doesn't do a call that loses track of the memory // state, all the possible memory states are possible initial states of the corresponding catch block(s).) NodeToUnsignedMap* GetMemorySsaMap(MemoryKind memoryKind) { if (memoryKind == GcHeap && byrefStatesMatchGcHeapStates) { // Use the same map for GCHeap and ByrefExposed when their states match. memoryKind = ByrefExposed; } assert(memoryKind < MemoryKindCount); Compiler* compRoot = impInlineRoot(); if (compRoot->m_memorySsaMap[memoryKind] == nullptr) { // Create a CompAllocator that labels sub-structure with CMK_ArrayInfoMap, and use that for allocation. CompAllocator ialloc(getAllocator(CMK_ArrayInfoMap)); compRoot->m_memorySsaMap[memoryKind] = new (ialloc) NodeToUnsignedMap(ialloc); } return compRoot->m_memorySsaMap[memoryKind]; } // The Refany type is the only struct type whose structure is implicitly assumed by IL. We need its fields. CORINFO_CLASS_HANDLE m_refAnyClass; CORINFO_FIELD_HANDLE GetRefanyDataField() { if (m_refAnyClass == nullptr) { m_refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF); } return info.compCompHnd->getFieldInClass(m_refAnyClass, 0); } CORINFO_FIELD_HANDLE GetRefanyTypeField() { if (m_refAnyClass == nullptr) { m_refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF); } return info.compCompHnd->getFieldInClass(m_refAnyClass, 1); } #if VARSET_COUNTOPS static BitSetSupport::BitSetOpCounter m_varsetOpCounter; #endif #if ALLVARSET_COUNTOPS static BitSetSupport::BitSetOpCounter m_allvarsetOpCounter; #endif static HelperCallProperties s_helperCallProperties; #ifdef UNIX_AMD64_ABI static var_types GetTypeFromClassificationAndSizes(SystemVClassificationType classType, int size); static var_types GetEightByteType(const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR& structDesc, unsigned slotNum); static void GetStructTypeOffset(const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR& structDesc, var_types* type0, var_types* type1, unsigned __int8* offset0, unsigned __int8* offset1); void GetStructTypeOffset(CORINFO_CLASS_HANDLE typeHnd, var_types* type0, var_types* type1, unsigned __int8* offset0, unsigned __int8* offset1); #endif // defined(UNIX_AMD64_ABI) void fgMorphMultiregStructArgs(GenTreeCall* call); GenTree* fgMorphMultiregStructArg(GenTree* arg, fgArgTabEntry* fgEntryPtr); bool killGCRefs(GenTree* tree); }; // end of class Compiler //--------------------------------------------------------------------------------------------------------------------- // GenTreeVisitor: a flexible tree walker implemented using the curiously-recurring-template pattern. // // This class implements a configurable walker for IR trees. There are five configuration options (defaults values are // shown in parentheses): // // - ComputeStack (false): when true, the walker will push each node onto the `m_ancestors` stack. "Ancestors" is a bit // of a misnomer, as the first entry will always be the current node. // // - DoPreOrder (false): when true, the walker will invoke `TVisitor::PreOrderVisit` with the current node as an // argument before visiting the node's operands. // // - DoPostOrder (false): when true, the walker will invoke `TVisitor::PostOrderVisit` with the current node as an // argument after visiting the node's operands. // // - DoLclVarsOnly (false): when true, the walker will only invoke `TVisitor::PreOrderVisit` for lclVar nodes. // `DoPreOrder` must be true if this option is true. // // - UseExecutionOrder (false): when true, then walker will visit a node's operands in execution order (e.g. if a // binary operator has the `GTF_REVERSE_OPS` flag set, the second operand will be // visited before the first). // // At least one of `DoPreOrder` and `DoPostOrder` must be specified. // // A simple pre-order visitor might look something like the following: // // class CountingVisitor final : public GenTreeVisitor<CountingVisitor> // { // public: // enum // { // DoPreOrder = true // }; // // unsigned m_count; // // CountingVisitor(Compiler* compiler) // : GenTreeVisitor<CountingVisitor>(compiler), m_count(0) // { // } // // Compiler::fgWalkResult PreOrderVisit(GenTree* node) // { // m_count++; // } // }; // // This visitor would then be used like so: // // CountingVisitor countingVisitor(compiler); // countingVisitor.WalkTree(root); // template <typename TVisitor> class GenTreeVisitor { protected: typedef Compiler::fgWalkResult fgWalkResult; enum { ComputeStack = false, DoPreOrder = false, DoPostOrder = false, DoLclVarsOnly = false, UseExecutionOrder = false, }; Compiler* m_compiler; ArrayStack<GenTree*> m_ancestors; GenTreeVisitor(Compiler* compiler) : m_compiler(compiler), m_ancestors(compiler->getAllocator(CMK_ArrayStack)) { assert(compiler != nullptr); static_assert_no_msg(TVisitor::DoPreOrder || TVisitor::DoPostOrder); static_assert_no_msg(!TVisitor::DoLclVarsOnly || TVisitor::DoPreOrder); } fgWalkResult PreOrderVisit(GenTree** use, GenTree* user) { return fgWalkResult::WALK_CONTINUE; } fgWalkResult PostOrderVisit(GenTree** use, GenTree* user) { return fgWalkResult::WALK_CONTINUE; } public: fgWalkResult WalkTree(GenTree** use, GenTree* user) { assert(use != nullptr); GenTree* node = *use; if (TVisitor::ComputeStack) { m_ancestors.Push(node); } fgWalkResult result = fgWalkResult::WALK_CONTINUE; if (TVisitor::DoPreOrder && !TVisitor::DoLclVarsOnly) { result = reinterpret_cast<TVisitor*>(this)->PreOrderVisit(use, user); if (result == fgWalkResult::WALK_ABORT) { return result; } node = *use; if ((node == nullptr) || (result == fgWalkResult::WALK_SKIP_SUBTREES)) { goto DONE; } } switch (node->OperGet()) { // Leaf lclVars case GT_LCL_VAR: case GT_LCL_FLD: case GT_LCL_VAR_ADDR: case GT_LCL_FLD_ADDR: if (TVisitor::DoLclVarsOnly) { result = reinterpret_cast<TVisitor*>(this)->PreOrderVisit(use, user); if (result == fgWalkResult::WALK_ABORT) { return result; } } FALLTHROUGH; // Leaf nodes case GT_CATCH_ARG: case GT_LABEL: case GT_FTN_ADDR: case GT_RET_EXPR: case GT_CNS_INT: case GT_CNS_LNG: case GT_CNS_DBL: case GT_CNS_STR: case GT_MEMORYBARRIER: case GT_JMP: case GT_JCC: case GT_SETCC: case GT_NO_OP: case GT_START_NONGC: case GT_START_PREEMPTGC: case GT_PROF_HOOK: #if !defined(FEATURE_EH_FUNCLETS) case GT_END_LFIN: #endif // !FEATURE_EH_FUNCLETS case GT_PHI_ARG: case GT_JMPTABLE: case GT_CLS_VAR: case GT_CLS_VAR_ADDR: case GT_ARGPLACE: case GT_PHYSREG: case GT_EMITNOP: case GT_PINVOKE_PROLOG: case GT_PINVOKE_EPILOG: case GT_IL_OFFSET: break; // Lclvar unary operators case GT_STORE_LCL_VAR: case GT_STORE_LCL_FLD: if (TVisitor::DoLclVarsOnly) { result = reinterpret_cast<TVisitor*>(this)->PreOrderVisit(use, user); if (result == fgWalkResult::WALK_ABORT) { return result; } } FALLTHROUGH; // Standard unary operators case GT_NOT: case GT_NEG: case GT_BSWAP: case GT_BSWAP16: case GT_COPY: case GT_RELOAD: case GT_ARR_LENGTH: case GT_CAST: case GT_BITCAST: case GT_CKFINITE: case GT_LCLHEAP: case GT_ADDR: case GT_IND: case GT_OBJ: case GT_BLK: case GT_BOX: case GT_ALLOCOBJ: case GT_INIT_VAL: case GT_JTRUE: case GT_SWITCH: case GT_NULLCHECK: case GT_PUTARG_REG: case GT_PUTARG_STK: case GT_PUTARG_TYPE: case GT_RETURNTRAP: case GT_NOP: case GT_FIELD: case GT_RETURN: case GT_RETFILT: case GT_RUNTIMELOOKUP: case GT_KEEPALIVE: case GT_INC_SATURATE: { GenTreeUnOp* const unOp = node->AsUnOp(); if (unOp->gtOp1 != nullptr) { result = WalkTree(&unOp->gtOp1, unOp); if (result == fgWalkResult::WALK_ABORT) { return result; } } break; } // Special nodes case GT_PHI: for (GenTreePhi::Use& use : node->AsPhi()->Uses()) { result = WalkTree(&use.NodeRef(), node); if (result == fgWalkResult::WALK_ABORT) { return result; } } break; case GT_FIELD_LIST: for (GenTreeFieldList::Use& use : node->AsFieldList()->Uses()) { result = WalkTree(&use.NodeRef(), node); if (result == fgWalkResult::WALK_ABORT) { return result; } } break; case GT_CMPXCHG: { GenTreeCmpXchg* const cmpXchg = node->AsCmpXchg(); result = WalkTree(&cmpXchg->gtOpLocation, cmpXchg); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(&cmpXchg->gtOpValue, cmpXchg); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(&cmpXchg->gtOpComparand, cmpXchg); if (result == fgWalkResult::WALK_ABORT) { return result; } break; } case GT_ARR_ELEM: { GenTreeArrElem* const arrElem = node->AsArrElem(); result = WalkTree(&arrElem->gtArrObj, arrElem); if (result == fgWalkResult::WALK_ABORT) { return result; } const unsigned rank = arrElem->gtArrRank; for (unsigned dim = 0; dim < rank; dim++) { result = WalkTree(&arrElem->gtArrInds[dim], arrElem); if (result == fgWalkResult::WALK_ABORT) { return result; } } break; } case GT_ARR_OFFSET: { GenTreeArrOffs* const arrOffs = node->AsArrOffs(); result = WalkTree(&arrOffs->gtOffset, arrOffs); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(&arrOffs->gtIndex, arrOffs); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(&arrOffs->gtArrObj, arrOffs); if (result == fgWalkResult::WALK_ABORT) { return result; } break; } case GT_STORE_DYN_BLK: { GenTreeStoreDynBlk* const dynBlock = node->AsStoreDynBlk(); GenTree** op1Use = &dynBlock->gtOp1; GenTree** op2Use = &dynBlock->gtOp2; GenTree** op3Use = &dynBlock->gtDynamicSize; result = WalkTree(op1Use, dynBlock); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(op2Use, dynBlock); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(op3Use, dynBlock); if (result == fgWalkResult::WALK_ABORT) { return result; } break; } case GT_CALL: { GenTreeCall* const call = node->AsCall(); if (call->gtCallThisArg != nullptr) { result = WalkTree(&call->gtCallThisArg->NodeRef(), call); if (result == fgWalkResult::WALK_ABORT) { return result; } } for (GenTreeCall::Use& use : call->Args()) { result = WalkTree(&use.NodeRef(), call); if (result == fgWalkResult::WALK_ABORT) { return result; } } for (GenTreeCall::Use& use : call->LateArgs()) { result = WalkTree(&use.NodeRef(), call); if (result == fgWalkResult::WALK_ABORT) { return result; } } if (call->gtCallType == CT_INDIRECT) { if (call->gtCallCookie != nullptr) { result = WalkTree(&call->gtCallCookie, call); if (result == fgWalkResult::WALK_ABORT) { return result; } } result = WalkTree(&call->gtCallAddr, call); if (result == fgWalkResult::WALK_ABORT) { return result; } } if (call->gtControlExpr != nullptr) { result = WalkTree(&call->gtControlExpr, call); if (result == fgWalkResult::WALK_ABORT) { return result; } } break; } #if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS) #if defined(FEATURE_SIMD) case GT_SIMD: #endif #if defined(FEATURE_HW_INTRINSICS) case GT_HWINTRINSIC: #endif if (TVisitor::UseExecutionOrder && node->IsReverseOp()) { assert(node->AsMultiOp()->GetOperandCount() == 2); result = WalkTree(&node->AsMultiOp()->Op(2), node); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(&node->AsMultiOp()->Op(1), node); if (result == fgWalkResult::WALK_ABORT) { return result; } } else { for (GenTree** use : node->AsMultiOp()->UseEdges()) { result = WalkTree(use, node); if (result == fgWalkResult::WALK_ABORT) { return result; } } } break; #endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS) // Binary nodes default: { assert(node->OperIsBinary()); GenTreeOp* const op = node->AsOp(); GenTree** op1Use = &op->gtOp1; GenTree** op2Use = &op->gtOp2; if (TVisitor::UseExecutionOrder && node->IsReverseOp()) { std::swap(op1Use, op2Use); } if (*op1Use != nullptr) { result = WalkTree(op1Use, op); if (result == fgWalkResult::WALK_ABORT) { return result; } } if (*op2Use != nullptr) { result = WalkTree(op2Use, op); if (result == fgWalkResult::WALK_ABORT) { return result; } } break; } } DONE: // Finally, visit the current node if (TVisitor::DoPostOrder) { result = reinterpret_cast<TVisitor*>(this)->PostOrderVisit(use, user); } if (TVisitor::ComputeStack) { m_ancestors.Pop(); } return result; } }; template <bool computeStack, bool doPreOrder, bool doPostOrder, bool doLclVarsOnly, bool useExecutionOrder> class GenericTreeWalker final : public GenTreeVisitor<GenericTreeWalker<computeStack, doPreOrder, doPostOrder, doLclVarsOnly, useExecutionOrder>> { public: enum { ComputeStack = computeStack, DoPreOrder = doPreOrder, DoPostOrder = doPostOrder, DoLclVarsOnly = doLclVarsOnly, UseExecutionOrder = useExecutionOrder, }; private: Compiler::fgWalkData* m_walkData; public: GenericTreeWalker(Compiler::fgWalkData* walkData) : GenTreeVisitor<GenericTreeWalker<computeStack, doPreOrder, doPostOrder, doLclVarsOnly, useExecutionOrder>>( walkData->compiler) , m_walkData(walkData) { assert(walkData != nullptr); if (computeStack) { walkData->parentStack = &this->m_ancestors; } } Compiler::fgWalkResult PreOrderVisit(GenTree** use, GenTree* user) { m_walkData->parent = user; return m_walkData->wtprVisitorFn(use, m_walkData); } Compiler::fgWalkResult PostOrderVisit(GenTree** use, GenTree* user) { m_walkData->parent = user; return m_walkData->wtpoVisitorFn(use, m_walkData); } }; // A dominator tree visitor implemented using the curiously-recurring-template pattern, similar to GenTreeVisitor. template <typename TVisitor> class DomTreeVisitor { protected: Compiler* const m_compiler; DomTreeNode* const m_domTree; DomTreeVisitor(Compiler* compiler, DomTreeNode* domTree) : m_compiler(compiler), m_domTree(domTree) { } void Begin() { } void PreOrderVisit(BasicBlock* block) { } void PostOrderVisit(BasicBlock* block) { } void End() { } public: //------------------------------------------------------------------------ // WalkTree: Walk the dominator tree, starting from fgFirstBB. // // Notes: // This performs a non-recursive, non-allocating walk of the tree by using // DomTreeNode's firstChild and nextSibling links to locate the children of // a node and BasicBlock's bbIDom parent link to go back up the tree when // no more children are left. // // Forests are also supported, provided that all the roots are chained via // DomTreeNode::nextSibling to fgFirstBB. // void WalkTree() { static_cast<TVisitor*>(this)->Begin(); for (BasicBlock *next, *block = m_compiler->fgFirstBB; block != nullptr; block = next) { static_cast<TVisitor*>(this)->PreOrderVisit(block); next = m_domTree[block->bbNum].firstChild; if (next != nullptr) { assert(next->bbIDom == block); continue; } do { static_cast<TVisitor*>(this)->PostOrderVisit(block); next = m_domTree[block->bbNum].nextSibling; if (next != nullptr) { assert(next->bbIDom == block->bbIDom); break; } block = block->bbIDom; } while (block != nullptr); } static_cast<TVisitor*>(this)->End(); } }; // EHClauses: adapter class for forward iteration of the exception handling table using range-based `for`, e.g.: // for (EHblkDsc* const ehDsc : EHClauses(compiler)) // class EHClauses { EHblkDsc* m_begin; EHblkDsc* m_end; // Forward iterator for the exception handling table entries. Iteration is in table order. // class iterator { EHblkDsc* m_ehDsc; public: iterator(EHblkDsc* ehDsc) : m_ehDsc(ehDsc) { } EHblkDsc* operator*() const { return m_ehDsc; } iterator& operator++() { ++m_ehDsc; return *this; } bool operator!=(const iterator& i) const { return m_ehDsc != i.m_ehDsc; } }; public: EHClauses(Compiler* comp) : m_begin(comp->compHndBBtab), m_end(comp->compHndBBtab + comp->compHndBBtabCount) { assert((m_begin != nullptr) || (m_begin == m_end)); } iterator begin() const { return iterator(m_begin); } iterator end() const { return iterator(m_end); } }; /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Miscellaneous Compiler stuff XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ // Values used to mark the types a stack slot is used for const unsigned TYPE_REF_INT = 0x01; // slot used as a 32-bit int const unsigned TYPE_REF_LNG = 0x02; // slot used as a 64-bit long const unsigned TYPE_REF_FLT = 0x04; // slot used as a 32-bit float const unsigned TYPE_REF_DBL = 0x08; // slot used as a 64-bit float const unsigned TYPE_REF_PTR = 0x10; // slot used as a 32-bit pointer const unsigned TYPE_REF_BYR = 0x20; // slot used as a byref pointer const unsigned TYPE_REF_STC = 0x40; // slot used as a struct const unsigned TYPE_REF_TYPEMASK = 0x7F; // bits that represent the type // const unsigned TYPE_REF_ADDR_TAKEN = 0x80; // slots address was taken /***************************************************************************** * * Variables to keep track of total code amounts. */ #if DISPLAY_SIZES extern size_t grossVMsize; extern size_t grossNCsize; extern size_t totalNCsize; extern unsigned genMethodICnt; extern unsigned genMethodNCnt; extern size_t gcHeaderISize; extern size_t gcPtrMapISize; extern size_t gcHeaderNSize; extern size_t gcPtrMapNSize; #endif // DISPLAY_SIZES /***************************************************************************** * * Variables to keep track of basic block counts (more data on 1 BB methods) */ #if COUNT_BASIC_BLOCKS extern Histogram bbCntTable; extern Histogram bbOneBBSizeTable; #endif /***************************************************************************** * * Used by optFindNaturalLoops to gather statistical information such as * - total number of natural loops * - number of loops with 1, 2, ... exit conditions * - number of loops that have an iterator (for like) * - number of loops that have a constant iterator */ #if COUNT_LOOPS extern unsigned totalLoopMethods; // counts the total number of methods that have natural loops extern unsigned maxLoopsPerMethod; // counts the maximum number of loops a method has extern unsigned totalLoopOverflows; // # of methods that identified more loops than we can represent extern unsigned totalLoopCount; // counts the total number of natural loops extern unsigned totalUnnatLoopCount; // counts the total number of (not-necessarily natural) loops extern unsigned totalUnnatLoopOverflows; // # of methods that identified more unnatural loops than we can represent extern unsigned iterLoopCount; // counts the # of loops with an iterator (for like) extern unsigned simpleTestLoopCount; // counts the # of loops with an iterator and a simple loop condition (iter < // const) extern unsigned constIterLoopCount; // counts the # of loops with a constant iterator (for like) extern bool hasMethodLoops; // flag to keep track if we already counted a method as having loops extern unsigned loopsThisMethod; // counts the number of loops in the current method extern bool loopOverflowThisMethod; // True if we exceeded the max # of loops in the method. extern Histogram loopCountTable; // Histogram of loop counts extern Histogram loopExitCountTable; // Histogram of loop exit counts #endif // COUNT_LOOPS /***************************************************************************** * variables to keep track of how many iterations we go in a dataflow pass */ #if DATAFLOW_ITER extern unsigned CSEiterCount; // counts the # of iteration for the CSE dataflow extern unsigned CFiterCount; // counts the # of iteration for the Const Folding dataflow #endif // DATAFLOW_ITER #if MEASURE_BLOCK_SIZE extern size_t genFlowNodeSize; extern size_t genFlowNodeCnt; #endif // MEASURE_BLOCK_SIZE #if MEASURE_NODE_SIZE struct NodeSizeStats { void Init() { genTreeNodeCnt = 0; genTreeNodeSize = 0; genTreeNodeActualSize = 0; } // Count of tree nodes allocated. unsigned __int64 genTreeNodeCnt; // The size we allocate. unsigned __int64 genTreeNodeSize; // The actual size of the node. Note that the actual size will likely be smaller // than the allocated size, but we sometimes use SetOper()/ChangeOper() to change // a smaller node to a larger one. TODO-Cleanup: add stats on // SetOper()/ChangeOper() usage to quantify this. unsigned __int64 genTreeNodeActualSize; }; extern NodeSizeStats genNodeSizeStats; // Total node size stats extern NodeSizeStats genNodeSizeStatsPerFunc; // Per-function node size stats extern Histogram genTreeNcntHist; extern Histogram genTreeNsizHist; #endif // MEASURE_NODE_SIZE /***************************************************************************** * Count fatal errors (including noway_asserts). */ #if MEASURE_FATAL extern unsigned fatal_badCode; extern unsigned fatal_noWay; extern unsigned fatal_implLimitation; extern unsigned fatal_NOMEM; extern unsigned fatal_noWayAssertBody; #ifdef DEBUG extern unsigned fatal_noWayAssertBodyArgs; #endif // DEBUG extern unsigned fatal_NYI; #endif // MEASURE_FATAL /***************************************************************************** * Codegen */ #ifdef TARGET_XARCH const instruction INS_SHIFT_LEFT_LOGICAL = INS_shl; const instruction INS_SHIFT_RIGHT_LOGICAL = INS_shr; const instruction INS_SHIFT_RIGHT_ARITHM = INS_sar; const instruction INS_AND = INS_and; const instruction INS_OR = INS_or; const instruction INS_XOR = INS_xor; const instruction INS_NEG = INS_neg; const instruction INS_TEST = INS_test; const instruction INS_MUL = INS_imul; const instruction INS_SIGNED_DIVIDE = INS_idiv; const instruction INS_UNSIGNED_DIVIDE = INS_div; const instruction INS_BREAKPOINT = INS_int3; const instruction INS_ADDC = INS_adc; const instruction INS_SUBC = INS_sbb; const instruction INS_NOT = INS_not; #endif // TARGET_XARCH #ifdef TARGET_ARM const instruction INS_SHIFT_LEFT_LOGICAL = INS_lsl; const instruction INS_SHIFT_RIGHT_LOGICAL = INS_lsr; const instruction INS_SHIFT_RIGHT_ARITHM = INS_asr; const instruction INS_AND = INS_and; const instruction INS_OR = INS_orr; const instruction INS_XOR = INS_eor; const instruction INS_NEG = INS_rsb; const instruction INS_TEST = INS_tst; const instruction INS_MUL = INS_mul; const instruction INS_MULADD = INS_mla; const instruction INS_SIGNED_DIVIDE = INS_sdiv; const instruction INS_UNSIGNED_DIVIDE = INS_udiv; const instruction INS_BREAKPOINT = INS_bkpt; const instruction INS_ADDC = INS_adc; const instruction INS_SUBC = INS_sbc; const instruction INS_NOT = INS_mvn; const instruction INS_ABS = INS_vabs; const instruction INS_SQRT = INS_vsqrt; #endif // TARGET_ARM #ifdef TARGET_ARM64 const instruction INS_MULADD = INS_madd; inline const instruction INS_BREAKPOINT_osHelper() { // GDB needs the encoding of brk #0 // Windbg needs the encoding of brk #F000 return TargetOS::IsUnix ? INS_brk_unix : INS_brk_windows; } #define INS_BREAKPOINT INS_BREAKPOINT_osHelper() const instruction INS_ABS = INS_fabs; const instruction INS_SQRT = INS_fsqrt; #endif // TARGET_ARM64 /*****************************************************************************/ extern const BYTE genTypeSizes[]; extern const BYTE genTypeAlignments[]; extern const BYTE genTypeStSzs[]; extern const BYTE genActualTypes[]; /*****************************************************************************/ #ifdef DEBUG void dumpConvertedVarSet(Compiler* comp, VARSET_VALARG_TP vars); #endif // DEBUG #include "compiler.hpp" // All the shared inline functions /*****************************************************************************/ #endif //_COMPILER_H_ /*****************************************************************************/
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Compiler XX XX XX XX Represents the method data we are currently JIT-compiling. XX XX An instance of this class is created for every method we JIT. XX XX This contains all the info needed for the method. So allocating a XX XX a new instance per method makes it thread-safe. XX XX It should be used to do all the memory management for the compiler run. XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ /*****************************************************************************/ #ifndef _COMPILER_H_ #define _COMPILER_H_ /*****************************************************************************/ #include "jit.h" #include "opcode.h" #include "varset.h" #include "jitstd.h" #include "jithashtable.h" #include "gentree.h" #include "debuginfo.h" #include "lir.h" #include "block.h" #include "inline.h" #include "jiteh.h" #include "instr.h" #include "regalloc.h" #include "sm.h" #include "cycletimer.h" #include "blockset.h" #include "arraystack.h" #include "hashbv.h" #include "jitexpandarray.h" #include "tinyarray.h" #include "valuenum.h" #include "jittelemetry.h" #include "namedintrinsiclist.h" #ifdef LATE_DISASM #include "disasm.h" #endif #include "codegeninterface.h" #include "regset.h" #include "jitgcinfo.h" #if DUMP_GC_TABLES && defined(JIT32_GCENCODER) #include "gcdump.h" #endif #include "emit.h" #include "hwintrinsic.h" #include "simd.h" #include "simdashwintrinsic.h" // This is only used locally in the JIT to indicate that // a verification block should be inserted #define SEH_VERIFICATION_EXCEPTION 0xe0564552 // VER /***************************************************************************** * Forward declarations */ struct InfoHdr; // defined in GCInfo.h struct escapeMapping_t; // defined in fgdiagnostic.cpp class emitter; // defined in emit.h struct ShadowParamVarInfo; // defined in GSChecks.cpp struct InitVarDscInfo; // defined in register_arg_convention.h class FgStack; // defined in fgbasic.cpp class Instrumentor; // defined in fgprofile.cpp class SpanningTreeVisitor; // defined in fgprofile.cpp class CSE_DataFlow; // defined in OptCSE.cpp class OptBoolsDsc; // defined in optimizer.cpp #ifdef DEBUG struct IndentStack; #endif class Lowering; // defined in lower.h // The following are defined in this file, Compiler.h class Compiler; /***************************************************************************** * Unwind info */ #include "unwind.h" /*****************************************************************************/ // // Declare global operator new overloads that use the compiler's arena allocator // // I wanted to make the second argument optional, with default = CMK_Unknown, but that // caused these to be ambiguous with the global placement new operators. void* __cdecl operator new(size_t n, Compiler* context, CompMemKind cmk); void* __cdecl operator new[](size_t n, Compiler* context, CompMemKind cmk); void* __cdecl operator new(size_t n, void* p, const jitstd::placement_t& syntax_difference); // Requires the definitions of "operator new" so including "LoopCloning.h" after the definitions. #include "loopcloning.h" /*****************************************************************************/ /* This is included here and not earlier as it needs the definition of "CSE" * which is defined in the section above */ /*****************************************************************************/ unsigned genLog2(unsigned value); unsigned genLog2(unsigned __int64 value); unsigned ReinterpretHexAsDecimal(unsigned in); /*****************************************************************************/ const unsigned FLG_CCTOR = (CORINFO_FLG_CONSTRUCTOR | CORINFO_FLG_STATIC); #ifdef DEBUG const int BAD_STK_OFFS = 0xBAADF00D; // for LclVarDsc::lvStkOffs #endif //------------------------------------------------------------------------ // HFA info shared by LclVarDsc and fgArgTabEntry //------------------------------------------------------------------------ inline bool IsHfa(CorInfoHFAElemType kind) { return kind != CORINFO_HFA_ELEM_NONE; } inline var_types HfaTypeFromElemKind(CorInfoHFAElemType kind) { switch (kind) { case CORINFO_HFA_ELEM_FLOAT: return TYP_FLOAT; case CORINFO_HFA_ELEM_DOUBLE: return TYP_DOUBLE; #ifdef FEATURE_SIMD case CORINFO_HFA_ELEM_VECTOR64: return TYP_SIMD8; case CORINFO_HFA_ELEM_VECTOR128: return TYP_SIMD16; #endif case CORINFO_HFA_ELEM_NONE: return TYP_UNDEF; default: assert(!"Invalid HfaElemKind"); return TYP_UNDEF; } } inline CorInfoHFAElemType HfaElemKindFromType(var_types type) { switch (type) { case TYP_FLOAT: return CORINFO_HFA_ELEM_FLOAT; case TYP_DOUBLE: return CORINFO_HFA_ELEM_DOUBLE; #ifdef FEATURE_SIMD case TYP_SIMD8: return CORINFO_HFA_ELEM_VECTOR64; case TYP_SIMD16: return CORINFO_HFA_ELEM_VECTOR128; #endif case TYP_UNDEF: return CORINFO_HFA_ELEM_NONE; default: assert(!"Invalid HFA Type"); return CORINFO_HFA_ELEM_NONE; } } // The following holds the Local var info (scope information) typedef const char* VarName; // Actual ASCII string struct VarScopeDsc { unsigned vsdVarNum; // (remapped) LclVarDsc number unsigned vsdLVnum; // 'which' in eeGetLVinfo(). // Also, it is the index of this entry in the info.compVarScopes array, // which is useful since the array is also accessed via the // compEnterScopeList and compExitScopeList sorted arrays. IL_OFFSET vsdLifeBeg; // instr offset of beg of life IL_OFFSET vsdLifeEnd; // instr offset of end of life #ifdef DEBUG VarName vsdName; // name of the var #endif }; // This class stores information associated with a LclVar SSA definition. class LclSsaVarDsc { // The basic block where the definition occurs. Definitions of uninitialized variables // are considered to occur at the start of the first basic block (fgFirstBB). // // TODO-Cleanup: In the case of uninitialized variables the block is set to nullptr by // SsaBuilder and changed to fgFirstBB during value numbering. It would be useful to // investigate and perhaps eliminate this rather unexpected behavior. BasicBlock* m_block; // The GT_ASG node that generates the definition, or nullptr for definitions // of uninitialized variables. GenTreeOp* m_asg; public: LclSsaVarDsc() : m_block(nullptr), m_asg(nullptr) { } LclSsaVarDsc(BasicBlock* block, GenTreeOp* asg) : m_block(block), m_asg(asg) { assert((asg == nullptr) || asg->OperIs(GT_ASG)); } BasicBlock* GetBlock() const { return m_block; } void SetBlock(BasicBlock* block) { m_block = block; } GenTreeOp* GetAssignment() const { return m_asg; } void SetAssignment(GenTreeOp* asg) { assert((asg == nullptr) || asg->OperIs(GT_ASG)); m_asg = asg; } ValueNumPair m_vnPair; }; // This class stores information associated with a memory SSA definition. class SsaMemDef { public: ValueNumPair m_vnPair; }; //------------------------------------------------------------------------ // SsaDefArray: A resizable array of SSA definitions. // // Unlike an ordinary resizable array implementation, this allows only element // addition (by calling AllocSsaNum) and has special handling for RESERVED_SSA_NUM // (basically it's a 1-based array). The array doesn't impose any particular // requirements on the elements it stores and AllocSsaNum forwards its arguments // to the array element constructor, this way the array supports both LclSsaVarDsc // and SsaMemDef elements. // template <typename T> class SsaDefArray { T* m_array; unsigned m_arraySize; unsigned m_count; static_assert_no_msg(SsaConfig::RESERVED_SSA_NUM == 0); static_assert_no_msg(SsaConfig::FIRST_SSA_NUM == 1); // Get the minimum valid SSA number. unsigned GetMinSsaNum() const { return SsaConfig::FIRST_SSA_NUM; } // Increase (double) the size of the array. void GrowArray(CompAllocator alloc) { unsigned oldSize = m_arraySize; unsigned newSize = max(2, oldSize * 2); T* newArray = alloc.allocate<T>(newSize); for (unsigned i = 0; i < oldSize; i++) { newArray[i] = m_array[i]; } m_array = newArray; m_arraySize = newSize; } public: // Construct an empty SsaDefArray. SsaDefArray() : m_array(nullptr), m_arraySize(0), m_count(0) { } // Reset the array (used only if the SSA form is reconstructed). void Reset() { m_count = 0; } // Allocate a new SSA number (starting with SsaConfig::FIRST_SSA_NUM). template <class... Args> unsigned AllocSsaNum(CompAllocator alloc, Args&&... args) { if (m_count == m_arraySize) { GrowArray(alloc); } unsigned ssaNum = GetMinSsaNum() + m_count; m_array[m_count++] = T(std::forward<Args>(args)...); // Ensure that the first SSA number we allocate is SsaConfig::FIRST_SSA_NUM assert((ssaNum == SsaConfig::FIRST_SSA_NUM) || (m_count > 1)); return ssaNum; } // Get the number of SSA definitions in the array. unsigned GetCount() const { return m_count; } // Get a pointer to the SSA definition at the specified index. T* GetSsaDefByIndex(unsigned index) { assert(index < m_count); return &m_array[index]; } // Check if the specified SSA number is valid. bool IsValidSsaNum(unsigned ssaNum) const { return (GetMinSsaNum() <= ssaNum) && (ssaNum < (GetMinSsaNum() + m_count)); } // Get a pointer to the SSA definition associated with the specified SSA number. T* GetSsaDef(unsigned ssaNum) { assert(ssaNum != SsaConfig::RESERVED_SSA_NUM); return GetSsaDefByIndex(ssaNum - GetMinSsaNum()); } // Get an SSA number associated with the specified SSA def (that must be in this array). unsigned GetSsaNum(T* ssaDef) { assert((m_array <= ssaDef) && (ssaDef < &m_array[m_count])); return GetMinSsaNum() + static_cast<unsigned>(ssaDef - &m_array[0]); } }; enum RefCountState { RCS_INVALID, // not valid to get/set ref counts RCS_EARLY, // early counts for struct promotion and struct passing RCS_NORMAL, // normal ref counts (from lvaMarkRefs onward) }; #ifdef DEBUG // Reasons why we can't enregister a local. enum class DoNotEnregisterReason { None, AddrExposed, // the address of this local is exposed. DontEnregStructs, // struct enregistration is disabled. NotRegSizeStruct, // the struct size does not much any register size, usually the struct size is too big. LocalField, // the local is accessed with LCL_FLD, note we can do it not only for struct locals. VMNeedsStackAddr, LiveInOutOfHandler, // the local is alive in and out of exception handler and not signle def. BlockOp, // Is read or written via a block operation. IsStructArg, // Is a struct passed as an argument in a way that requires a stack location. DepField, // It is a field of a dependently promoted struct NoRegVars, // opts.compFlags & CLFLG_REGVAR is not set MinOptsGC, // It is a GC Ref and we are compiling MinOpts #if !defined(TARGET_64BIT) LongParamField, // It is a decomposed field of a long parameter. #endif #ifdef JIT32_GCENCODER PinningRef, #endif LclAddrNode, // the local is accessed with LCL_ADDR_VAR/FLD. CastTakesAddr, StoreBlkSrc, // the local is used as STORE_BLK source. OneAsgRetyping, // fgMorphOneAsgBlockOp prevents this local from being enregister. SwizzleArg, // the local is passed using LCL_FLD as another type. BlockOpRet, // the struct is returned and it promoted or there is a cast. ReturnSpCheck, // the local is used to do SP check SimdUserForcesDep // a promoted struct was used by a SIMD/HWI node; it must be dependently promoted }; enum class AddressExposedReason { NONE, PARENT_EXPOSED, // This is a promoted field but the parent is exposed. TOO_CONSERVATIVE, // Were marked as exposed to be conservative, fix these places. ESCAPE_ADDRESS, // The address is escaping, for example, passed as call argument. WIDE_INDIR, // We access via indirection with wider type. OSR_EXPOSED, // It was exposed in the original method, osr has to repeat it. STRESS_LCL_FLD, // Stress mode replaces localVar with localFld and makes them addrExposed. COPY_FLD_BY_FLD, // Field by field copy takes the address of the local, can be fixed. DISPATCH_RET_BUF // Caller return buffer dispatch. }; #endif // DEBUG class LclVarDsc { public: // The constructor. Most things can just be zero'ed. // // Initialize the ArgRegs to REG_STK. // Morph will update if this local is passed in a register. LclVarDsc() : _lvArgReg(REG_STK) , #if FEATURE_MULTIREG_ARGS _lvOtherArgReg(REG_STK) , #endif // FEATURE_MULTIREG_ARGS lvClassHnd(NO_CLASS_HANDLE) , lvRefBlks(BlockSetOps::UninitVal()) , lvPerSsaData() { } // note this only packs because var_types is a typedef of unsigned char var_types lvType : 5; // TYP_INT/LONG/FLOAT/DOUBLE/REF unsigned char lvIsParam : 1; // is this a parameter? unsigned char lvIsRegArg : 1; // is this an argument that was passed by register? unsigned char lvFramePointerBased : 1; // 0 = off of REG_SPBASE (e.g., ESP), 1 = off of REG_FPBASE (e.g., EBP) unsigned char lvOnFrame : 1; // (part of) the variable lives on the frame unsigned char lvRegister : 1; // assigned to live in a register? For RyuJIT backend, this is only set if the // variable is in the same register for the entire function. unsigned char lvTracked : 1; // is this a tracked variable? bool lvTrackedNonStruct() { return lvTracked && lvType != TYP_STRUCT; } unsigned char lvPinned : 1; // is this a pinned variable? unsigned char lvMustInit : 1; // must be initialized private: bool m_addrExposed : 1; // The address of this variable is "exposed" -- passed as an argument, stored in a // global location, etc. // We cannot reason reliably about the value of the variable. public: unsigned char lvDoNotEnregister : 1; // Do not enregister this variable. unsigned char lvFieldAccessed : 1; // The var is a struct local, and a field of the variable is accessed. Affects // struct promotion. unsigned char lvLiveInOutOfHndlr : 1; // The variable is live in or out of an exception handler, and therefore must // be on the stack (at least at those boundaries.) unsigned char lvInSsa : 1; // The variable is in SSA form (set by SsaBuilder) unsigned char lvIsCSE : 1; // Indicates if this LclVar is a CSE variable. unsigned char lvHasLdAddrOp : 1; // has ldloca or ldarga opcode on this local. unsigned char lvStackByref : 1; // This is a compiler temporary of TYP_BYREF that is known to point into our local // stack frame. unsigned char lvHasILStoreOp : 1; // there is at least one STLOC or STARG on this local unsigned char lvHasMultipleILStoreOp : 1; // there is more than one STLOC on this local unsigned char lvIsTemp : 1; // Short-lifetime compiler temp #if defined(TARGET_AMD64) || defined(TARGET_ARM64) unsigned char lvIsImplicitByRef : 1; // Set if the argument is an implicit byref. #endif // defined(TARGET_AMD64) || defined(TARGET_ARM64) unsigned char lvIsBoolean : 1; // set if variable is boolean unsigned char lvSingleDef : 1; // variable has a single def // before lvaMarkLocalVars: identifies ref type locals that can get type updates // after lvaMarkLocalVars: identifies locals that are suitable for optAddCopies unsigned char lvSingleDefRegCandidate : 1; // variable has a single def and hence is a register candidate // Currently, this is only used to decide if an EH variable can be // a register candiate or not. unsigned char lvDisqualifySingleDefRegCandidate : 1; // tracks variable that are disqualified from register // candidancy unsigned char lvSpillAtSingleDef : 1; // variable has a single def (as determined by LSRA interval scan) // and is spilled making it candidate to spill right after the // first (and only) definition. // Note: We cannot reuse lvSingleDefRegCandidate because it is set // in earlier phase and the information might not be appropriate // in LSRA. unsigned char lvDisqualify : 1; // variable is no longer OK for add copy optimization unsigned char lvVolatileHint : 1; // hint for AssertionProp #ifndef TARGET_64BIT unsigned char lvStructDoubleAlign : 1; // Must we double align this struct? #endif // !TARGET_64BIT #ifdef TARGET_64BIT unsigned char lvQuirkToLong : 1; // Quirk to allocate this LclVar as a 64-bit long #endif #ifdef DEBUG unsigned char lvKeepType : 1; // Don't change the type of this variable unsigned char lvNoLclFldStress : 1; // Can't apply local field stress on this one #endif unsigned char lvIsPtr : 1; // Might this be used in an address computation? (used by buffer overflow security // checks) unsigned char lvIsUnsafeBuffer : 1; // Does this contain an unsafe buffer requiring buffer overflow security checks? unsigned char lvPromoted : 1; // True when this local is a promoted struct, a normed struct, or a "split" long on a // 32-bit target. For implicit byref parameters, this gets hijacked between // fgRetypeImplicitByRefArgs and fgMarkDemotedImplicitByRefArgs to indicate whether // references to the arg are being rewritten as references to a promoted shadow local. unsigned char lvIsStructField : 1; // Is this local var a field of a promoted struct local? unsigned char lvOverlappingFields : 1; // True when we have a struct with possibly overlapping fields unsigned char lvContainsHoles : 1; // True when we have a promoted struct that contains holes unsigned char lvCustomLayout : 1; // True when this struct has "CustomLayout" unsigned char lvIsMultiRegArg : 1; // true if this is a multireg LclVar struct used in an argument context unsigned char lvIsMultiRegRet : 1; // true if this is a multireg LclVar struct assigned from a multireg call #ifdef FEATURE_HFA_FIELDS_PRESENT CorInfoHFAElemType _lvHfaElemKind : 3; // What kind of an HFA this is (CORINFO_HFA_ELEM_NONE if it is not an HFA). #endif // FEATURE_HFA_FIELDS_PRESENT #ifdef DEBUG // TODO-Cleanup: See the note on lvSize() - this flag is only in use by asserts that are checking for struct // types, and is needed because of cases where TYP_STRUCT is bashed to an integral type. // Consider cleaning this up so this workaround is not required. unsigned char lvUnusedStruct : 1; // All references to this promoted struct are through its field locals. // I.e. there is no longer any reference to the struct directly. // In this case we can simply remove this struct local. unsigned char lvUndoneStructPromotion : 1; // The struct promotion was undone and hence there should be no // reference to the fields of this struct. #endif unsigned char lvLRACandidate : 1; // Tracked for linear scan register allocation purposes #ifdef FEATURE_SIMD // Note that both SIMD vector args and locals are marked as lvSIMDType = true, but the // type of an arg node is TYP_BYREF and a local node is TYP_SIMD*. unsigned char lvSIMDType : 1; // This is a SIMD struct unsigned char lvUsedInSIMDIntrinsic : 1; // This tells lclvar is used for simd intrinsic unsigned char lvSimdBaseJitType : 5; // Note: this only packs because CorInfoType has less than 32 entries CorInfoType GetSimdBaseJitType() const { return (CorInfoType)lvSimdBaseJitType; } void SetSimdBaseJitType(CorInfoType simdBaseJitType) { assert(simdBaseJitType < (1 << 5)); lvSimdBaseJitType = (unsigned char)simdBaseJitType; } var_types GetSimdBaseType() const; #endif // FEATURE_SIMD unsigned char lvRegStruct : 1; // This is a reg-sized non-field-addressed struct. unsigned char lvClassIsExact : 1; // lvClassHandle is the exact type #ifdef DEBUG unsigned char lvClassInfoUpdated : 1; // true if this var has updated class handle or exactness #endif unsigned char lvImplicitlyReferenced : 1; // true if there are non-IR references to this local (prolog, epilog, gc, // eh) unsigned char lvSuppressedZeroInit : 1; // local needs zero init if we transform tail call to loop unsigned char lvHasExplicitInit : 1; // The local is explicitly initialized and doesn't need zero initialization in // the prolog. If the local has gc pointers, there are no gc-safe points // between the prolog and the explicit initialization. union { unsigned lvFieldLclStart; // The index of the local var representing the first field in the promoted struct // local. For implicit byref parameters, this gets hijacked between // fgRetypeImplicitByRefArgs and fgMarkDemotedImplicitByRefArgs to point to the // struct local created to model the parameter's struct promotion, if any. unsigned lvParentLcl; // The index of the local var representing the parent (i.e. the promoted struct local). // Valid on promoted struct local fields. }; unsigned char lvFieldCnt; // Number of fields in the promoted VarDsc. unsigned char lvFldOffset; unsigned char lvFldOrdinal; #ifdef DEBUG unsigned char lvSingleDefDisqualifyReason = 'H'; #endif #if FEATURE_MULTIREG_ARGS regNumber lvRegNumForSlot(unsigned slotNum) { if (slotNum == 0) { return (regNumber)_lvArgReg; } else if (slotNum == 1) { return GetOtherArgReg(); } else { assert(false && "Invalid slotNum!"); } unreached(); } #endif // FEATURE_MULTIREG_ARGS CorInfoHFAElemType GetLvHfaElemKind() const { #ifdef FEATURE_HFA_FIELDS_PRESENT return _lvHfaElemKind; #else NOWAY_MSG("GetLvHfaElemKind"); return CORINFO_HFA_ELEM_NONE; #endif // FEATURE_HFA_FIELDS_PRESENT } void SetLvHfaElemKind(CorInfoHFAElemType elemKind) { #ifdef FEATURE_HFA_FIELDS_PRESENT _lvHfaElemKind = elemKind; #else NOWAY_MSG("SetLvHfaElemKind"); #endif // FEATURE_HFA_FIELDS_PRESENT } bool lvIsHfa() const { if (GlobalJitOptions::compFeatureHfa) { return IsHfa(GetLvHfaElemKind()); } else { return false; } } bool lvIsHfaRegArg() const { if (GlobalJitOptions::compFeatureHfa) { return lvIsRegArg && lvIsHfa(); } else { return false; } } //------------------------------------------------------------------------------ // lvHfaSlots: Get the number of slots used by an HFA local // // Return Value: // On Arm64 - Returns 1-4 indicating the number of register slots used by the HFA // On Arm32 - Returns the total number of single FP register slots used by the HFA, max is 8 // unsigned lvHfaSlots() const { assert(lvIsHfa()); assert(varTypeIsStruct(lvType)); unsigned slots = 0; #ifdef TARGET_ARM slots = lvExactSize / sizeof(float); assert(slots <= 8); #elif defined(TARGET_ARM64) switch (GetLvHfaElemKind()) { case CORINFO_HFA_ELEM_NONE: assert(!"lvHfaSlots called for non-HFA"); break; case CORINFO_HFA_ELEM_FLOAT: assert((lvExactSize % 4) == 0); slots = lvExactSize >> 2; break; case CORINFO_HFA_ELEM_DOUBLE: case CORINFO_HFA_ELEM_VECTOR64: assert((lvExactSize % 8) == 0); slots = lvExactSize >> 3; break; case CORINFO_HFA_ELEM_VECTOR128: assert((lvExactSize % 16) == 0); slots = lvExactSize >> 4; break; default: unreached(); } assert(slots <= 4); #endif // TARGET_ARM64 return slots; } // lvIsMultiRegArgOrRet() // returns true if this is a multireg LclVar struct used in an argument context // or if this is a multireg LclVar struct assigned from a multireg call bool lvIsMultiRegArgOrRet() { return lvIsMultiRegArg || lvIsMultiRegRet; } #if defined(DEBUG) private: DoNotEnregisterReason m_doNotEnregReason; AddressExposedReason m_addrExposedReason; public: void SetDoNotEnregReason(DoNotEnregisterReason reason) { m_doNotEnregReason = reason; } DoNotEnregisterReason GetDoNotEnregReason() const { return m_doNotEnregReason; } AddressExposedReason GetAddrExposedReason() const { return m_addrExposedReason; } #endif // DEBUG public: void SetAddressExposed(bool value DEBUGARG(AddressExposedReason reason)) { m_addrExposed = value; INDEBUG(m_addrExposedReason = reason); } void CleanAddressExposed() { m_addrExposed = false; } bool IsAddressExposed() const { return m_addrExposed; } private: regNumberSmall _lvRegNum; // Used to store the register this variable is in (or, the low register of a // register pair). It is set during codegen any time the // variable is enregistered (lvRegister is only set // to non-zero if the variable gets the same register assignment for its entire // lifetime). #if !defined(TARGET_64BIT) regNumberSmall _lvOtherReg; // Used for "upper half" of long var. #endif // !defined(TARGET_64BIT) regNumberSmall _lvArgReg; // The (first) register in which this argument is passed. #if FEATURE_MULTIREG_ARGS regNumberSmall _lvOtherArgReg; // Used for the second part of the struct passed in a register. // Note this is defined but not used by ARM32 #endif // FEATURE_MULTIREG_ARGS regNumberSmall _lvArgInitReg; // the register into which the argument is moved at entry public: // The register number is stored in a small format (8 bits), but the getters return and the setters take // a full-size (unsigned) format, to localize the casts here. ///////////////////// regNumber GetRegNum() const { return (regNumber)_lvRegNum; } void SetRegNum(regNumber reg) { _lvRegNum = (regNumberSmall)reg; assert(_lvRegNum == reg); } ///////////////////// #if defined(TARGET_64BIT) regNumber GetOtherReg() const { assert(!"shouldn't get here"); // can't use "unreached();" because it's NORETURN, which causes C4072 // "unreachable code" warnings return REG_NA; } void SetOtherReg(regNumber reg) { assert(!"shouldn't get here"); // can't use "unreached();" because it's NORETURN, which causes C4072 // "unreachable code" warnings } #else // !TARGET_64BIT regNumber GetOtherReg() const { return (regNumber)_lvOtherReg; } void SetOtherReg(regNumber reg) { _lvOtherReg = (regNumberSmall)reg; assert(_lvOtherReg == reg); } #endif // !TARGET_64BIT ///////////////////// regNumber GetArgReg() const { return (regNumber)_lvArgReg; } void SetArgReg(regNumber reg) { _lvArgReg = (regNumberSmall)reg; assert(_lvArgReg == reg); } #if FEATURE_MULTIREG_ARGS regNumber GetOtherArgReg() const { return (regNumber)_lvOtherArgReg; } void SetOtherArgReg(regNumber reg) { _lvOtherArgReg = (regNumberSmall)reg; assert(_lvOtherArgReg == reg); } #endif // FEATURE_MULTIREG_ARGS #ifdef FEATURE_SIMD // Is this is a SIMD struct? bool lvIsSIMDType() const { return lvSIMDType; } // Is this is a SIMD struct which is used for SIMD intrinsic? bool lvIsUsedInSIMDIntrinsic() const { return lvUsedInSIMDIntrinsic; } #else // If feature_simd not enabled, return false bool lvIsSIMDType() const { return false; } bool lvIsUsedInSIMDIntrinsic() const { return false; } #endif ///////////////////// regNumber GetArgInitReg() const { return (regNumber)_lvArgInitReg; } void SetArgInitReg(regNumber reg) { _lvArgInitReg = (regNumberSmall)reg; assert(_lvArgInitReg == reg); } ///////////////////// bool lvIsRegCandidate() const { return lvLRACandidate != 0; } bool lvIsInReg() const { return lvIsRegCandidate() && (GetRegNum() != REG_STK); } regMaskTP lvRegMask() const { regMaskTP regMask = RBM_NONE; if (varTypeUsesFloatReg(TypeGet())) { if (GetRegNum() != REG_STK) { regMask = genRegMaskFloat(GetRegNum(), TypeGet()); } } else { if (GetRegNum() != REG_STK) { regMask = genRegMask(GetRegNum()); } } return regMask; } unsigned short lvVarIndex; // variable tracking index private: unsigned short m_lvRefCnt; // unweighted (real) reference count. For implicit by reference // parameters, this gets hijacked from fgResetImplicitByRefRefCount // through fgMarkDemotedImplicitByRefArgs, to provide a static // appearance count (computed during address-exposed analysis) // that fgMakeOutgoingStructArgCopy consults during global morph // to determine if eliding its copy is legal. weight_t m_lvRefCntWtd; // weighted reference count public: unsigned short lvRefCnt(RefCountState state = RCS_NORMAL) const; void incLvRefCnt(unsigned short delta, RefCountState state = RCS_NORMAL); void setLvRefCnt(unsigned short newValue, RefCountState state = RCS_NORMAL); weight_t lvRefCntWtd(RefCountState state = RCS_NORMAL) const; void incLvRefCntWtd(weight_t delta, RefCountState state = RCS_NORMAL); void setLvRefCntWtd(weight_t newValue, RefCountState state = RCS_NORMAL); private: int lvStkOffs; // stack offset of home in bytes. public: int GetStackOffset() const { return lvStkOffs; } void SetStackOffset(int offset) { lvStkOffs = offset; } unsigned lvExactSize; // (exact) size of the type in bytes // Is this a promoted struct? // This method returns true only for structs (including SIMD structs), not for // locals that are split on a 32-bit target. // It is only necessary to use this: // 1) if only structs are wanted, and // 2) if Lowering has already been done. // Otherwise lvPromoted is valid. bool lvPromotedStruct() { #if !defined(TARGET_64BIT) return (lvPromoted && !varTypeIsLong(lvType)); #else // defined(TARGET_64BIT) return lvPromoted; #endif // defined(TARGET_64BIT) } unsigned lvSize() const; size_t lvArgStackSize() const; unsigned lvSlotNum; // original slot # (if remapped) typeInfo lvVerTypeInfo; // type info needed for verification // class handle for the local or null if not known or not a class, // for a struct handle use `GetStructHnd()`. CORINFO_CLASS_HANDLE lvClassHnd; // Get class handle for a struct local or implicitByRef struct local. CORINFO_CLASS_HANDLE GetStructHnd() const { #ifdef FEATURE_SIMD if (lvSIMDType && (m_layout == nullptr)) { return NO_CLASS_HANDLE; } #endif assert(m_layout != nullptr); #if defined(TARGET_AMD64) || defined(TARGET_ARM64) assert(varTypeIsStruct(TypeGet()) || (lvIsImplicitByRef && (TypeGet() == TYP_BYREF))); #else assert(varTypeIsStruct(TypeGet())); #endif CORINFO_CLASS_HANDLE structHnd = m_layout->GetClassHandle(); assert(structHnd != NO_CLASS_HANDLE); return structHnd; } CORINFO_FIELD_HANDLE lvFieldHnd; // field handle for promoted struct fields private: ClassLayout* m_layout; // layout info for structs public: BlockSet lvRefBlks; // Set of blocks that contain refs Statement* lvDefStmt; // Pointer to the statement with the single definition void lvaDisqualifyVar(); // Call to disqualify a local variable from use in optAddCopies var_types TypeGet() const { return (var_types)lvType; } bool lvStackAligned() const { assert(lvIsStructField); return ((lvFldOffset % TARGET_POINTER_SIZE) == 0); } bool lvNormalizeOnLoad() const { return varTypeIsSmall(TypeGet()) && // lvIsStructField is treated the same as the aliased local, see fgDoNormalizeOnStore. (lvIsParam || m_addrExposed || lvIsStructField); } bool lvNormalizeOnStore() const { return varTypeIsSmall(TypeGet()) && // lvIsStructField is treated the same as the aliased local, see fgDoNormalizeOnStore. !(lvIsParam || m_addrExposed || lvIsStructField); } void incRefCnts(weight_t weight, Compiler* pComp, RefCountState state = RCS_NORMAL, bool propagate = true); var_types GetHfaType() const { if (GlobalJitOptions::compFeatureHfa) { assert(lvIsHfa()); return HfaTypeFromElemKind(GetLvHfaElemKind()); } else { return TYP_UNDEF; } } void SetHfaType(var_types type) { if (GlobalJitOptions::compFeatureHfa) { CorInfoHFAElemType elemKind = HfaElemKindFromType(type); SetLvHfaElemKind(elemKind); // Ensure we've allocated enough bits. assert(GetLvHfaElemKind() == elemKind); } } // Returns true if this variable contains GC pointers (including being a GC pointer itself). bool HasGCPtr() const { return varTypeIsGC(lvType) || ((lvType == TYP_STRUCT) && m_layout->HasGCPtr()); } // Returns the layout of a struct variable. ClassLayout* GetLayout() const { assert(varTypeIsStruct(lvType)); return m_layout; } // Sets the layout of a struct variable. void SetLayout(ClassLayout* layout) { assert(varTypeIsStruct(lvType)); assert((m_layout == nullptr) || ClassLayout::AreCompatible(m_layout, layout)); m_layout = layout; } SsaDefArray<LclSsaVarDsc> lvPerSsaData; // Returns the address of the per-Ssa data for the given ssaNum (which is required // not to be the SsaConfig::RESERVED_SSA_NUM, which indicates that the variable is // not an SSA variable). LclSsaVarDsc* GetPerSsaData(unsigned ssaNum) { return lvPerSsaData.GetSsaDef(ssaNum); } // Returns the SSA number for "ssaDef". Requires "ssaDef" to be a valid definition // of this variable. unsigned GetSsaNumForSsaDef(LclSsaVarDsc* ssaDef) { return lvPerSsaData.GetSsaNum(ssaDef); } var_types GetRegisterType(const GenTreeLclVarCommon* tree) const; var_types GetRegisterType() const; var_types GetActualRegisterType() const; bool IsEnregisterableType() const { return GetRegisterType() != TYP_UNDEF; } bool IsEnregisterableLcl() const { if (lvDoNotEnregister) { return false; } return IsEnregisterableType(); } //----------------------------------------------------------------------------- // IsAlwaysAliveInMemory: Determines if this variable's value is always // up-to-date on stack. This is possible if this is an EH-var or // we decided to spill after single-def. // bool IsAlwaysAliveInMemory() const { return lvLiveInOutOfHndlr || lvSpillAtSingleDef; } bool CanBeReplacedWithItsField(Compiler* comp) const; #ifdef DEBUG public: const char* lvReason; void PrintVarReg() const { printf("%s", getRegName(GetRegNum())); } #endif // DEBUG }; // class LclVarDsc enum class SymbolicIntegerValue : int32_t { LongMin, IntMin, ShortMin, ByteMin, Zero, One, ByteMax, UByteMax, ShortMax, UShortMax, IntMax, UIntMax, LongMax, }; inline constexpr bool operator>(SymbolicIntegerValue left, SymbolicIntegerValue right) { return static_cast<int32_t>(left) > static_cast<int32_t>(right); } inline constexpr bool operator>=(SymbolicIntegerValue left, SymbolicIntegerValue right) { return static_cast<int32_t>(left) >= static_cast<int32_t>(right); } inline constexpr bool operator<(SymbolicIntegerValue left, SymbolicIntegerValue right) { return static_cast<int32_t>(left) < static_cast<int32_t>(right); } inline constexpr bool operator<=(SymbolicIntegerValue left, SymbolicIntegerValue right) { return static_cast<int32_t>(left) <= static_cast<int32_t>(right); } // Represents an integral range useful for reasoning about integral casts. // It uses a symbolic representation for lower and upper bounds so // that it can efficiently handle integers of all sizes on all hosts. // // Note that the ranges represented by this class are **always** in the // "signed" domain. This is so that if we know the range a node produces, it // can be trivially used to determine if a cast above the node does or does not // overflow, which requires that the interpretation of integers be the same both // for the "input" and "output". We choose signed interpretation here because it // produces nice continuous ranges and because IR uses sign-extension for constants. // // Some examples of how ranges are computed for casts: // 1. CAST_OVF(ubyte <- uint): does not overflow for [0..UBYTE_MAX], produces the // same range - all casts that do not change the representation, i. e. have the same // "actual" input and output type, have the same "input" and "output" range. // 2. CAST_OVF(ulong <- uint): never oveflows => the "input" range is [INT_MIN..INT_MAX] // (aka all possible 32 bit integers). Produces [0..UINT_MAX] (aka all possible 32 // bit integers zero-extended to 64 bits). // 3. CAST_OVF(int <- uint): overflows for inputs larger than INT_MAX <=> less than 0 // when interpreting as signed => the "input" range is [0..INT_MAX], the same range // being the produced one as the node does not change the width of the integer. // class IntegralRange { private: SymbolicIntegerValue m_lowerBound; SymbolicIntegerValue m_upperBound; public: IntegralRange() = default; IntegralRange(SymbolicIntegerValue lowerBound, SymbolicIntegerValue upperBound) : m_lowerBound(lowerBound), m_upperBound(upperBound) { assert(lowerBound <= upperBound); } bool Contains(int64_t value) const; bool Contains(IntegralRange other) const { return (m_lowerBound <= other.m_lowerBound) && (other.m_upperBound <= m_upperBound); } bool IsPositive() { return m_lowerBound >= SymbolicIntegerValue::Zero; } bool Equals(IntegralRange other) const { return (m_lowerBound == other.m_lowerBound) && (m_upperBound == other.m_upperBound); } static int64_t SymbolicToRealValue(SymbolicIntegerValue value); static SymbolicIntegerValue LowerBoundForType(var_types type); static SymbolicIntegerValue UpperBoundForType(var_types type); static IntegralRange ForType(var_types type) { return {LowerBoundForType(type), UpperBoundForType(type)}; } static IntegralRange ForNode(GenTree* node, Compiler* compiler); static IntegralRange ForCastInput(GenTreeCast* cast); static IntegralRange ForCastOutput(GenTreeCast* cast); #ifdef DEBUG static void Print(IntegralRange range); #endif // DEBUG }; /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX TempsInfo XX XX XX XX The temporary lclVars allocated by the compiler for code generation XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ /***************************************************************************** * * The following keeps track of temporaries allocated in the stack frame * during code-generation (after register allocation). These spill-temps are * only used if we run out of registers while evaluating a tree. * * These are different from the more common temps allocated by lvaGrabTemp(). */ class TempDsc { public: TempDsc* tdNext; private: int tdOffs; #ifdef DEBUG static const int BAD_TEMP_OFFSET = 0xDDDDDDDD; // used as a sentinel "bad value" for tdOffs in DEBUG #endif // DEBUG int tdNum; BYTE tdSize; var_types tdType; public: TempDsc(int _tdNum, unsigned _tdSize, var_types _tdType) : tdNum(_tdNum), tdSize((BYTE)_tdSize), tdType(_tdType) { #ifdef DEBUG // temps must have a negative number (so they have a different number from all local variables) assert(tdNum < 0); tdOffs = BAD_TEMP_OFFSET; #endif // DEBUG if (tdNum != _tdNum) { IMPL_LIMITATION("too many spill temps"); } } #ifdef DEBUG bool tdLegalOffset() const { return tdOffs != BAD_TEMP_OFFSET; } #endif // DEBUG int tdTempOffs() const { assert(tdLegalOffset()); return tdOffs; } void tdSetTempOffs(int offs) { tdOffs = offs; assert(tdLegalOffset()); } void tdAdjustTempOffs(int offs) { tdOffs += offs; assert(tdLegalOffset()); } int tdTempNum() const { assert(tdNum < 0); return tdNum; } unsigned tdTempSize() const { return tdSize; } var_types tdTempType() const { return tdType; } }; // interface to hide linearscan implementation from rest of compiler class LinearScanInterface { public: virtual void doLinearScan() = 0; virtual void recordVarLocationsAtStartOfBB(BasicBlock* bb) = 0; virtual bool willEnregisterLocalVars() const = 0; #if TRACK_LSRA_STATS virtual void dumpLsraStatsCsv(FILE* file) = 0; virtual void dumpLsraStatsSummary(FILE* file) = 0; #endif // TRACK_LSRA_STATS }; LinearScanInterface* getLinearScanAllocator(Compiler* comp); // Information about arrays: their element type and size, and the offset of the first element. // We label GT_IND's that are array indices with GTF_IND_ARR_INDEX, and, for such nodes, // associate an array info via the map retrieved by GetArrayInfoMap(). This information is used, // for example, in value numbering of array index expressions. struct ArrayInfo { var_types m_elemType; CORINFO_CLASS_HANDLE m_elemStructType; unsigned m_elemSize; unsigned m_elemOffset; ArrayInfo() : m_elemType(TYP_UNDEF), m_elemStructType(nullptr), m_elemSize(0), m_elemOffset(0) { } ArrayInfo(var_types elemType, unsigned elemSize, unsigned elemOffset, CORINFO_CLASS_HANDLE elemStructType) : m_elemType(elemType), m_elemStructType(elemStructType), m_elemSize(elemSize), m_elemOffset(elemOffset) { } }; // This enumeration names the phases into which we divide compilation. The phases should completely // partition a compilation. enum Phases { #define CompPhaseNameMacro(enum_nm, string_nm, short_nm, hasChildren, parent, measureIR) enum_nm, #include "compphases.h" PHASE_NUMBER_OF }; extern const char* PhaseNames[]; extern const char* PhaseEnums[]; extern const LPCWSTR PhaseShortNames[]; // Specify which checks should be run after each phase // enum class PhaseChecks { CHECK_NONE, CHECK_ALL }; // Specify compiler data that a phase might modify enum class PhaseStatus : unsigned { MODIFIED_NOTHING, MODIFIED_EVERYTHING }; // The following enum provides a simple 1:1 mapping to CLR API's enum API_ICorJitInfo_Names { #define DEF_CLR_API(name) API_##name, #include "ICorJitInfo_API_names.h" API_COUNT }; //--------------------------------------------------------------- // Compilation time. // // A "CompTimeInfo" is a structure for tracking the compilation time of one or more methods. // We divide a compilation into a sequence of contiguous phases, and track the total (per-thread) cycles // of the compilation, as well as the cycles for each phase. We also track the number of bytecodes. // If there is a failure in reading a timer at any point, the "CompTimeInfo" becomes invalid, as indicated // by "m_timerFailure" being true. // If FEATURE_JIT_METHOD_PERF is not set, we define a minimal form of this, enough to let other code compile. struct CompTimeInfo { #ifdef FEATURE_JIT_METHOD_PERF // The string names of the phases. static const char* PhaseNames[]; static bool PhaseHasChildren[]; static int PhaseParent[]; static bool PhaseReportsIRSize[]; unsigned m_byteCodeBytes; unsigned __int64 m_totalCycles; unsigned __int64 m_invokesByPhase[PHASE_NUMBER_OF]; unsigned __int64 m_cyclesByPhase[PHASE_NUMBER_OF]; #if MEASURE_CLRAPI_CALLS unsigned __int64 m_CLRinvokesByPhase[PHASE_NUMBER_OF]; unsigned __int64 m_CLRcyclesByPhase[PHASE_NUMBER_OF]; #endif unsigned m_nodeCountAfterPhase[PHASE_NUMBER_OF]; // For better documentation, we call EndPhase on // non-leaf phases. We should also call EndPhase on the // last leaf subphase; obviously, the elapsed cycles between the EndPhase // for the last leaf subphase and the EndPhase for an ancestor should be very small. // We add all such "redundant end phase" intervals to this variable below; we print // it out in a report, so we can verify that it is, indeed, very small. If it ever // isn't, this means that we're doing something significant between the end of the last // declared subphase and the end of its parent. unsigned __int64 m_parentPhaseEndSlop; bool m_timerFailure; #if MEASURE_CLRAPI_CALLS // The following measures the time spent inside each individual CLR API call. unsigned m_allClrAPIcalls; unsigned m_perClrAPIcalls[API_ICorJitInfo_Names::API_COUNT]; unsigned __int64 m_allClrAPIcycles; unsigned __int64 m_perClrAPIcycles[API_ICorJitInfo_Names::API_COUNT]; unsigned __int32 m_maxClrAPIcycles[API_ICorJitInfo_Names::API_COUNT]; #endif // MEASURE_CLRAPI_CALLS CompTimeInfo(unsigned byteCodeBytes); #endif }; #ifdef FEATURE_JIT_METHOD_PERF #if MEASURE_CLRAPI_CALLS struct WrapICorJitInfo; #endif // This class summarizes the JIT time information over the course of a run: the number of methods compiled, // and the total and maximum timings. (These are instances of the "CompTimeInfo" type described above). // The operation of adding a single method's timing to the summary may be performed concurrently by several // threads, so it is protected by a lock. // This class is intended to be used as a singleton type, with only a single instance. class CompTimeSummaryInfo { // This lock protects the fields of all CompTimeSummaryInfo(s) (of which we expect there to be one). static CritSecObject s_compTimeSummaryLock; int m_numMethods; int m_totMethods; CompTimeInfo m_total; CompTimeInfo m_maximum; int m_numFilteredMethods; CompTimeInfo m_filtered; // This can use what ever data you want to determine if the value to be added // belongs in the filtered section (it's always included in the unfiltered section) bool IncludedInFilteredData(CompTimeInfo& info); public: // This is the unique CompTimeSummaryInfo object for this instance of the runtime. static CompTimeSummaryInfo s_compTimeSummary; CompTimeSummaryInfo() : m_numMethods(0), m_totMethods(0), m_total(0), m_maximum(0), m_numFilteredMethods(0), m_filtered(0) { } // Assumes that "info" is a completed CompTimeInfo for a compilation; adds it to the summary. // This is thread safe. void AddInfo(CompTimeInfo& info, bool includePhases); // Print the summary information to "f". // This is not thread-safe; assumed to be called by only one thread. void Print(FILE* f); }; // A JitTimer encapsulates a CompTimeInfo for a single compilation. It also tracks the start of compilation, // and when the current phase started. This is intended to be part of a Compilation object. // class JitTimer { unsigned __int64 m_start; // Start of the compilation. unsigned __int64 m_curPhaseStart; // Start of the current phase. #if MEASURE_CLRAPI_CALLS unsigned __int64 m_CLRcallStart; // Start of the current CLR API call (if any). unsigned __int64 m_CLRcallInvokes; // CLR API invokes under current outer so far unsigned __int64 m_CLRcallCycles; // CLR API cycles under current outer so far. int m_CLRcallAPInum; // The enum/index of the current CLR API call (or -1). static double s_cyclesPerSec; // Cached for speedier measurements #endif #ifdef DEBUG Phases m_lastPhase; // The last phase that was completed (or (Phases)-1 to start). #endif CompTimeInfo m_info; // The CompTimeInfo for this compilation. static CritSecObject s_csvLock; // Lock to protect the time log file. static FILE* s_csvFile; // The time log file handle. void PrintCsvMethodStats(Compiler* comp); private: void* operator new(size_t); void* operator new[](size_t); void operator delete(void*); void operator delete[](void*); public: // Initialized the timer instance JitTimer(unsigned byteCodeSize); static JitTimer* Create(Compiler* comp, unsigned byteCodeSize) { return ::new (comp, CMK_Unknown) JitTimer(byteCodeSize); } static void PrintCsvHeader(); // Ends the current phase (argument is for a redundant check). void EndPhase(Compiler* compiler, Phases phase); #if MEASURE_CLRAPI_CALLS // Start and end a timed CLR API call. void CLRApiCallEnter(unsigned apix); void CLRApiCallLeave(unsigned apix); #endif // MEASURE_CLRAPI_CALLS // Completes the timing of the current method, which is assumed to have "byteCodeBytes" bytes of bytecode, // and adds it to "sum". void Terminate(Compiler* comp, CompTimeSummaryInfo& sum, bool includePhases); // Attempts to query the cycle counter of the current thread. If successful, returns "true" and sets // *cycles to the cycle counter value. Otherwise, returns false and sets the "m_timerFailure" flag of // "m_info" to true. bool GetThreadCycles(unsigned __int64* cycles) { bool res = CycleTimer::GetThreadCyclesS(cycles); if (!res) { m_info.m_timerFailure = true; } return res; } static void Shutdown(); }; #endif // FEATURE_JIT_METHOD_PERF //------------------- Function/Funclet info ------------------------------- enum FuncKind : BYTE { FUNC_ROOT, // The main/root function (always id==0) FUNC_HANDLER, // a funclet associated with an EH handler (finally, fault, catch, filter handler) FUNC_FILTER, // a funclet associated with an EH filter FUNC_COUNT }; class emitLocation; struct FuncInfoDsc { FuncKind funKind; BYTE funFlags; // Currently unused, just here for padding unsigned short funEHIndex; // index, into the ebd table, of innermost EH clause corresponding to this // funclet. It is only valid if funKind field indicates this is a // EH-related funclet: FUNC_HANDLER or FUNC_FILTER #if defined(TARGET_AMD64) // TODO-AMD64-Throughput: make the AMD64 info more like the ARM info to avoid having this large static array. emitLocation* startLoc; emitLocation* endLoc; emitLocation* coldStartLoc; // locations for the cold section, if there is one. emitLocation* coldEndLoc; UNWIND_INFO unwindHeader; // Maximum of 255 UNWIND_CODE 'nodes' and then the unwind header. If there are an odd // number of codes, the VM or Zapper will 4-byte align the whole thing. BYTE unwindCodes[offsetof(UNWIND_INFO, UnwindCode) + (0xFF * sizeof(UNWIND_CODE))]; unsigned unwindCodeSlot; #elif defined(TARGET_X86) emitLocation* startLoc; emitLocation* endLoc; emitLocation* coldStartLoc; // locations for the cold section, if there is one. emitLocation* coldEndLoc; #elif defined(TARGET_ARMARCH) UnwindInfo uwi; // Unwind information for this function/funclet's hot section UnwindInfo* uwiCold; // Unwind information for this function/funclet's cold section // Note: we only have a pointer here instead of the actual object, // to save memory in the JIT case (compared to the NGEN case), // where we don't have any cold section. // Note 2: we currently don't support hot/cold splitting in functions // with EH, so uwiCold will be NULL for all funclets. emitLocation* startLoc; emitLocation* endLoc; emitLocation* coldStartLoc; // locations for the cold section, if there is one. emitLocation* coldEndLoc; #endif // TARGET_ARMARCH #if defined(FEATURE_CFI_SUPPORT) jitstd::vector<CFI_CODE>* cfiCodes; #endif // FEATURE_CFI_SUPPORT // Eventually we may want to move rsModifiedRegsMask, lvaOutgoingArgSize, and anything else // that isn't shared between the main function body and funclets. }; struct fgArgTabEntry { GenTreeCall::Use* use; // Points to the argument's GenTreeCall::Use in gtCallArgs or gtCallThisArg. GenTreeCall::Use* lateUse; // Points to the argument's GenTreeCall::Use in gtCallLateArgs, if any. // Get the node that coresponds to this argument entry. // This is the "real" node and not a placeholder or setup node. GenTree* GetNode() const { return lateUse == nullptr ? use->GetNode() : lateUse->GetNode(); } unsigned argNum; // The original argument number, also specifies the required argument evaluation order from the IL private: regNumberSmall regNums[MAX_ARG_REG_COUNT]; // The registers to use when passing this argument, set to REG_STK for // arguments passed on the stack public: unsigned numRegs; // Count of number of registers that this argument uses. // Note that on ARM, if we have a double hfa, this reflects the number // of DOUBLE registers. #if defined(UNIX_AMD64_ABI) // Unix amd64 will split floating point types and integer types in structs // between floating point and general purpose registers. Keep track of that // information so we do not need to recompute it later. unsigned structIntRegs; unsigned structFloatRegs; #endif // UNIX_AMD64_ABI #if defined(DEBUG_ARG_SLOTS) // These fields were used to calculate stack size in stack slots for arguments // but now they are replaced by precise `m_byteOffset/m_byteSize` because of // arm64 apple abi requirements. // A slot is a pointer sized region in the OutArg area. unsigned slotNum; // When an argument is passed in the OutArg area this is the slot number in the OutArg area unsigned numSlots; // Count of number of slots that this argument uses #endif // DEBUG_ARG_SLOTS // Return number of stack slots that this argument is taking. // TODO-Cleanup: this function does not align with arm64 apple model, // delete it. In most cases we just want to know if we it is using stack or not // but in some cases we are checking if it is a multireg arg, like: // `numRegs + GetStackSlotsNumber() > 1` that is harder to replace. // unsigned GetStackSlotsNumber() const { return roundUp(GetStackByteSize(), TARGET_POINTER_SIZE) / TARGET_POINTER_SIZE; } private: unsigned _lateArgInx; // index into gtCallLateArgs list; UINT_MAX if this is not a late arg. public: unsigned tmpNum; // the LclVar number if we had to force evaluation of this arg var_types argType; // The type used to pass this argument. This is generally the original argument type, but when a // struct is passed as a scalar type, this is that type. // Note that if a struct is passed by reference, this will still be the struct type. bool needTmp : 1; // True when we force this argument's evaluation into a temp LclVar bool needPlace : 1; // True when we must replace this argument with a placeholder node bool isTmp : 1; // True when we setup a temp LclVar for this argument due to size issues with the struct bool processed : 1; // True when we have decided the evaluation order for this argument in the gtCallLateArgs bool isBackFilled : 1; // True when the argument fills a register slot skipped due to alignment requirements of // previous arguments. NonStandardArgKind nonStandardArgKind : 4; // The non-standard arg kind. Non-standard args are args that are forced // to be in certain registers or on the stack, regardless of where they // appear in the arg list. bool isStruct : 1; // True if this is a struct arg bool _isVararg : 1; // True if the argument is in a vararg context. bool passedByRef : 1; // True iff the argument is passed by reference. #if FEATURE_ARG_SPLIT bool _isSplit : 1; // True when this argument is split between the registers and OutArg area #endif // FEATURE_ARG_SPLIT #ifdef FEATURE_HFA_FIELDS_PRESENT CorInfoHFAElemType _hfaElemKind : 3; // What kind of an HFA this is (CORINFO_HFA_ELEM_NONE if it is not an HFA). #endif CorInfoHFAElemType GetHfaElemKind() const { #ifdef FEATURE_HFA_FIELDS_PRESENT return _hfaElemKind; #else NOWAY_MSG("GetHfaElemKind"); return CORINFO_HFA_ELEM_NONE; #endif } void SetHfaElemKind(CorInfoHFAElemType elemKind) { #ifdef FEATURE_HFA_FIELDS_PRESENT _hfaElemKind = elemKind; #else NOWAY_MSG("SetHfaElemKind"); #endif } bool isNonStandard() const { return nonStandardArgKind != NonStandardArgKind::None; } // Returns true if the IR node for this non-standarg arg is added by fgInitArgInfo. // In this case, it must be removed by GenTreeCall::ResetArgInfo. bool isNonStandardArgAddedLate() const { switch (static_cast<NonStandardArgKind>(nonStandardArgKind)) { case NonStandardArgKind::None: case NonStandardArgKind::PInvokeFrame: case NonStandardArgKind::ShiftLow: case NonStandardArgKind::ShiftHigh: case NonStandardArgKind::FixedRetBuffer: case NonStandardArgKind::ValidateIndirectCallTarget: return false; case NonStandardArgKind::WrapperDelegateCell: case NonStandardArgKind::VirtualStubCell: case NonStandardArgKind::PInvokeCookie: case NonStandardArgKind::PInvokeTarget: case NonStandardArgKind::R2RIndirectionCell: return true; default: unreached(); } } bool isLateArg() const { bool isLate = (_lateArgInx != UINT_MAX); return isLate; } unsigned GetLateArgInx() const { assert(isLateArg()); return _lateArgInx; } void SetLateArgInx(unsigned inx) { _lateArgInx = inx; } regNumber GetRegNum() const { return (regNumber)regNums[0]; } regNumber GetOtherRegNum() const { return (regNumber)regNums[1]; } #if defined(UNIX_AMD64_ABI) SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc; #endif void setRegNum(unsigned int i, regNumber regNum) { assert(i < MAX_ARG_REG_COUNT); regNums[i] = (regNumberSmall)regNum; } regNumber GetRegNum(unsigned int i) { assert(i < MAX_ARG_REG_COUNT); return (regNumber)regNums[i]; } bool IsSplit() const { #if FEATURE_ARG_SPLIT return compFeatureArgSplit() && _isSplit; #else // FEATURE_ARG_SPLIT return false; #endif } void SetSplit(bool value) { #if FEATURE_ARG_SPLIT _isSplit = value; #endif } bool IsVararg() const { return compFeatureVarArg() && _isVararg; } void SetIsVararg(bool value) { if (compFeatureVarArg()) { _isVararg = value; } } bool IsHfaArg() const { if (GlobalJitOptions::compFeatureHfa) { return IsHfa(GetHfaElemKind()); } else { return false; } } bool IsHfaRegArg() const { if (GlobalJitOptions::compFeatureHfa) { return IsHfa(GetHfaElemKind()) && isPassedInRegisters(); } else { return false; } } unsigned intRegCount() const { #if defined(UNIX_AMD64_ABI) if (this->isStruct) { return this->structIntRegs; } #endif // defined(UNIX_AMD64_ABI) if (!this->isPassedInFloatRegisters()) { return this->numRegs; } return 0; } unsigned floatRegCount() const { #if defined(UNIX_AMD64_ABI) if (this->isStruct) { return this->structFloatRegs; } #endif // defined(UNIX_AMD64_ABI) if (this->isPassedInFloatRegisters()) { return this->numRegs; } return 0; } // Get the number of bytes that this argument is occupying on the stack, // including padding up to the target pointer size for platforms // where a stack argument can't take less. unsigned GetStackByteSize() const { if (!IsSplit() && numRegs > 0) { return 0; } assert(!IsHfaArg() || !IsSplit()); assert(GetByteSize() > TARGET_POINTER_SIZE * numRegs); const unsigned stackByteSize = GetByteSize() - TARGET_POINTER_SIZE * numRegs; return stackByteSize; } var_types GetHfaType() const { if (GlobalJitOptions::compFeatureHfa) { return HfaTypeFromElemKind(GetHfaElemKind()); } else { return TYP_UNDEF; } } void SetHfaType(var_types type, unsigned hfaSlots) { if (GlobalJitOptions::compFeatureHfa) { if (type != TYP_UNDEF) { // We must already have set the passing mode. assert(numRegs != 0 || GetStackByteSize() != 0); // We originally set numRegs according to the size of the struct, but if the size of the // hfaType is not the same as the pointer size, we need to correct it. // Note that hfaSlots is the number of registers we will use. For ARM, that is twice // the number of "double registers". unsigned numHfaRegs = hfaSlots; #ifdef TARGET_ARM if (type == TYP_DOUBLE) { // Must be an even number of registers. assert((numRegs & 1) == 0); numHfaRegs = hfaSlots / 2; } #endif // TARGET_ARM if (!IsHfaArg()) { // We haven't previously set this; do so now. CorInfoHFAElemType elemKind = HfaElemKindFromType(type); SetHfaElemKind(elemKind); // Ensure we've allocated enough bits. assert(GetHfaElemKind() == elemKind); if (isPassedInRegisters()) { numRegs = numHfaRegs; } } else { // We've already set this; ensure that it's consistent. if (isPassedInRegisters()) { assert(numRegs == numHfaRegs); } assert(type == HfaTypeFromElemKind(GetHfaElemKind())); } } } } #ifdef TARGET_ARM void SetIsBackFilled(bool backFilled) { isBackFilled = backFilled; } bool IsBackFilled() const { return isBackFilled; } #else // !TARGET_ARM void SetIsBackFilled(bool backFilled) { } bool IsBackFilled() const { return false; } #endif // !TARGET_ARM bool isPassedInRegisters() const { return !IsSplit() && (numRegs != 0); } bool isPassedInFloatRegisters() const { #ifdef TARGET_X86 return false; #else return isValidFloatArgReg(GetRegNum()); #endif } // Can we replace the struct type of this node with a primitive type for argument passing? bool TryPassAsPrimitive() const { return !IsSplit() && ((numRegs == 1) || (m_byteSize <= TARGET_POINTER_SIZE)); } #if defined(DEBUG_ARG_SLOTS) // Returns the number of "slots" used, where for this purpose a // register counts as a slot. unsigned getSlotCount() const { if (isBackFilled) { assert(isPassedInRegisters()); assert(numRegs == 1); } else if (GetRegNum() == REG_STK) { assert(!isPassedInRegisters()); assert(numRegs == 0); } else { assert(numRegs > 0); } return numSlots + numRegs; } #endif #if defined(DEBUG_ARG_SLOTS) // Returns the size as a multiple of pointer-size. // For targets without HFAs, this is the same as getSlotCount(). unsigned getSize() const { unsigned size = getSlotCount(); if (GlobalJitOptions::compFeatureHfa) { if (IsHfaRegArg()) { #ifdef TARGET_ARM // We counted the number of regs, but if they are DOUBLE hfa regs we have to double the size. if (GetHfaType() == TYP_DOUBLE) { assert(!IsSplit()); size <<= 1; } #elif defined(TARGET_ARM64) // We counted the number of regs, but if they are FLOAT hfa regs we have to halve the size, // or if they are SIMD16 vector hfa regs we have to double the size. if (GetHfaType() == TYP_FLOAT) { // Round up in case of odd HFA count. size = (size + 1) >> 1; } #ifdef FEATURE_SIMD else if (GetHfaType() == TYP_SIMD16) { size <<= 1; } #endif // FEATURE_SIMD #endif // TARGET_ARM64 } } return size; } #endif // DEBUG_ARG_SLOTS private: unsigned m_byteOffset; // byte size that this argument takes including the padding after. // For example, 1-byte arg on x64 with 8-byte alignment // will have `m_byteSize == 8`, the same arg on apple arm64 will have `m_byteSize == 1`. unsigned m_byteSize; unsigned m_byteAlignment; // usually 4 or 8 bytes (slots/registers). public: void SetByteOffset(unsigned byteOffset) { DEBUG_ARG_SLOTS_ASSERT(byteOffset / TARGET_POINTER_SIZE == slotNum); m_byteOffset = byteOffset; } unsigned GetByteOffset() const { DEBUG_ARG_SLOTS_ASSERT(m_byteOffset / TARGET_POINTER_SIZE == slotNum); return m_byteOffset; } void SetByteSize(unsigned byteSize, bool isStruct, bool isFloatHfa) { unsigned roundedByteSize; if (compMacOsArm64Abi()) { // Only struct types need extension or rounding to pointer size, but HFA<float> does not. if (isStruct && !isFloatHfa) { roundedByteSize = roundUp(byteSize, TARGET_POINTER_SIZE); } else { roundedByteSize = byteSize; } } else { roundedByteSize = roundUp(byteSize, TARGET_POINTER_SIZE); } #if !defined(TARGET_ARM) // Arm32 could have a struct with 8 byte alignment // which rounded size % 8 is not 0. assert(m_byteAlignment != 0); assert(roundedByteSize % m_byteAlignment == 0); #endif // TARGET_ARM #if defined(DEBUG_ARG_SLOTS) if (!compMacOsArm64Abi() && !isStruct) { assert(roundedByteSize == getSlotCount() * TARGET_POINTER_SIZE); } #endif m_byteSize = roundedByteSize; } unsigned GetByteSize() const { return m_byteSize; } void SetByteAlignment(unsigned byteAlignment) { m_byteAlignment = byteAlignment; } unsigned GetByteAlignment() const { return m_byteAlignment; } // Set the register numbers for a multireg argument. // There's nothing to do on x64/Ux because the structDesc has already been used to set the // register numbers. void SetMultiRegNums() { #if FEATURE_MULTIREG_ARGS && !defined(UNIX_AMD64_ABI) if (numRegs == 1) { return; } regNumber argReg = GetRegNum(0); #ifdef TARGET_ARM unsigned int regSize = (GetHfaType() == TYP_DOUBLE) ? 2 : 1; #else unsigned int regSize = 1; #endif if (numRegs > MAX_ARG_REG_COUNT) NO_WAY("Multireg argument exceeds the maximum length"); for (unsigned int regIndex = 1; regIndex < numRegs; regIndex++) { argReg = (regNumber)(argReg + regSize); setRegNum(regIndex, argReg); } #endif // FEATURE_MULTIREG_ARGS && !defined(UNIX_AMD64_ABI) } #ifdef DEBUG // Check that the value of 'isStruct' is consistent. // A struct arg must be one of the following: // - A node of struct type, // - A GT_FIELD_LIST, or // - A node of a scalar type, passed in a single register or slot // (or two slots in the case of a struct pass on the stack as TYP_DOUBLE). // void checkIsStruct() const { GenTree* node = GetNode(); if (isStruct) { if (!varTypeIsStruct(node) && !node->OperIs(GT_FIELD_LIST)) { // This is the case where we are passing a struct as a primitive type. // On most targets, this is always a single register or slot. // However, on ARM this could be two slots if it is TYP_DOUBLE. bool isPassedAsPrimitiveType = ((numRegs == 1) || ((numRegs == 0) && (GetByteSize() <= TARGET_POINTER_SIZE))); #ifdef TARGET_ARM if (!isPassedAsPrimitiveType) { if (node->TypeGet() == TYP_DOUBLE && numRegs == 0 && (numSlots == 2)) { isPassedAsPrimitiveType = true; } } #endif // TARGET_ARM assert(isPassedAsPrimitiveType); } } else { assert(!varTypeIsStruct(node)); } } void Dump() const; #endif }; //------------------------------------------------------------------------- // // The class fgArgInfo is used to handle the arguments // when morphing a GT_CALL node. // class fgArgInfo { Compiler* compiler; // Back pointer to the compiler instance so that we can allocate memory GenTreeCall* callTree; // Back pointer to the GT_CALL node for this fgArgInfo unsigned argCount; // Updatable arg count value #if defined(DEBUG_ARG_SLOTS) unsigned nextSlotNum; // Updatable slot count value #endif unsigned nextStackByteOffset; unsigned stkLevel; // Stack depth when we make this call (for x86) #if defined(UNIX_X86_ABI) bool alignmentDone; // Updateable flag, set to 'true' after we've done any required alignment. unsigned stkSizeBytes; // Size of stack used by this call, in bytes. Calculated during fgMorphArgs(). unsigned padStkAlign; // Stack alignment in bytes required before arguments are pushed for this call. // Computed dynamically during codegen, based on stkSizeBytes and the current // stack level (genStackLevel) when the first stack adjustment is made for // this call. #endif #if FEATURE_FIXED_OUT_ARGS unsigned outArgSize; // Size of the out arg area for the call, will be at least MIN_ARG_AREA_FOR_CALL #endif unsigned argTableSize; // size of argTable array (equal to the argCount when done with fgMorphArgs) bool hasRegArgs; // true if we have one or more register arguments bool hasStackArgs; // true if we have one or more stack arguments bool argsComplete; // marker for state bool argsSorted; // marker for state bool needsTemps; // one or more arguments must be copied to a temp by EvalArgsToTemps fgArgTabEntry** argTable; // variable sized array of per argument descrption: (i.e. argTable[argTableSize]) private: void AddArg(fgArgTabEntry* curArgTabEntry); public: fgArgInfo(Compiler* comp, GenTreeCall* call, unsigned argCount); fgArgInfo(GenTreeCall* newCall, GenTreeCall* oldCall); fgArgTabEntry* AddRegArg(unsigned argNum, GenTree* node, GenTreeCall::Use* use, regNumber regNum, unsigned numRegs, unsigned byteSize, unsigned byteAlignment, bool isStruct, bool isFloatHfa, bool isVararg = false); #ifdef UNIX_AMD64_ABI fgArgTabEntry* AddRegArg(unsigned argNum, GenTree* node, GenTreeCall::Use* use, regNumber regNum, unsigned numRegs, unsigned byteSize, unsigned byteAlignment, const bool isStruct, const bool isFloatHfa, const bool isVararg, const regNumber otherRegNum, const unsigned structIntRegs, const unsigned structFloatRegs, const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* const structDescPtr = nullptr); #endif // UNIX_AMD64_ABI fgArgTabEntry* AddStkArg(unsigned argNum, GenTree* node, GenTreeCall::Use* use, unsigned numSlots, unsigned byteSize, unsigned byteAlignment, bool isStruct, bool isFloatHfa, bool isVararg = false); void RemorphReset(); void UpdateRegArg(fgArgTabEntry* argEntry, GenTree* node, bool reMorphing); void UpdateStkArg(fgArgTabEntry* argEntry, GenTree* node, bool reMorphing); void SplitArg(unsigned argNum, unsigned numRegs, unsigned numSlots); void EvalToTmp(fgArgTabEntry* curArgTabEntry, unsigned tmpNum, GenTree* newNode); void ArgsComplete(); void SortArgs(); void EvalArgsToTemps(); unsigned ArgCount() const { return argCount; } fgArgTabEntry** ArgTable() const { return argTable; } #if defined(DEBUG_ARG_SLOTS) unsigned GetNextSlotNum() const { return nextSlotNum; } #endif unsigned GetNextSlotByteOffset() const { return nextStackByteOffset; } bool HasRegArgs() const { return hasRegArgs; } bool NeedsTemps() const { return needsTemps; } bool HasStackArgs() const { return hasStackArgs; } bool AreArgsComplete() const { return argsComplete; } #if FEATURE_FIXED_OUT_ARGS unsigned GetOutArgSize() const { return outArgSize; } void SetOutArgSize(unsigned newVal) { outArgSize = newVal; } #endif // FEATURE_FIXED_OUT_ARGS #if defined(UNIX_X86_ABI) void ComputeStackAlignment(unsigned curStackLevelInBytes) { padStkAlign = AlignmentPad(curStackLevelInBytes, STACK_ALIGN); } unsigned GetStkAlign() const { return padStkAlign; } void SetStkSizeBytes(unsigned newStkSizeBytes) { stkSizeBytes = newStkSizeBytes; } unsigned GetStkSizeBytes() const { return stkSizeBytes; } bool IsStkAlignmentDone() const { return alignmentDone; } void SetStkAlignmentDone() { alignmentDone = true; } #endif // defined(UNIX_X86_ABI) // Get the fgArgTabEntry for the arg at position argNum. fgArgTabEntry* GetArgEntry(unsigned argNum, bool reMorphing = true) const { fgArgTabEntry* curArgTabEntry = nullptr; if (!reMorphing) { // The arg table has not yet been sorted. curArgTabEntry = argTable[argNum]; assert(curArgTabEntry->argNum == argNum); return curArgTabEntry; } for (unsigned i = 0; i < argCount; i++) { curArgTabEntry = argTable[i]; if (curArgTabEntry->argNum == argNum) { return curArgTabEntry; } } noway_assert(!"GetArgEntry: argNum not found"); return nullptr; } void SetNeedsTemps() { needsTemps = true; } // Get the node for the arg at position argIndex. // Caller must ensure that this index is a valid arg index. GenTree* GetArgNode(unsigned argIndex) const { return GetArgEntry(argIndex)->GetNode(); } void Dump(Compiler* compiler) const; }; #ifdef DEBUG // XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX // We have the ability to mark source expressions with "Test Labels." // These drive assertions within the JIT, or internal JIT testing. For example, we could label expressions // that should be CSE defs, and other expressions that should uses of those defs, with a shared label. enum TestLabel // This must be kept identical to System.Runtime.CompilerServices.JitTestLabel.TestLabel. { TL_SsaName, TL_VN, // Defines a "VN equivalence class". (For full VN, including exceptions thrown). TL_VNNorm, // Like above, but uses the non-exceptional value of the expression. TL_CSE_Def, // This must be identified in the JIT as a CSE def TL_CSE_Use, // This must be identified in the JIT as a CSE use TL_LoopHoist, // Expression must (or must not) be hoisted out of the loop. }; struct TestLabelAndNum { TestLabel m_tl; ssize_t m_num; TestLabelAndNum() : m_tl(TestLabel(0)), m_num(0) { } }; typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, TestLabelAndNum> NodeToTestDataMap; // XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #endif // DEBUG //------------------------------------------------------------------------- // LoopFlags: flags for the loop table. // enum LoopFlags : unsigned short { LPFLG_EMPTY = 0, // LPFLG_UNUSED = 0x0001, // LPFLG_UNUSED = 0x0002, LPFLG_ITER = 0x0004, // loop of form: for (i = icon or lclVar; test_condition(); i++) // LPFLG_UNUSED = 0x0008, LPFLG_CONTAINS_CALL = 0x0010, // If executing the loop body *may* execute a call LPFLG_VAR_INIT = 0x0020, // iterator is initialized with a local var (var # found in lpVarInit) LPFLG_CONST_INIT = 0x0040, // iterator is initialized with a constant (found in lpConstInit) LPFLG_SIMD_LIMIT = 0x0080, // iterator is compared with vector element count (found in lpConstLimit) LPFLG_VAR_LIMIT = 0x0100, // iterator is compared with a local var (var # found in lpVarLimit) LPFLG_CONST_LIMIT = 0x0200, // iterator is compared with a constant (found in lpConstLimit) LPFLG_ARRLEN_LIMIT = 0x0400, // iterator is compared with a.len or a[i].len (found in lpArrLenLimit) LPFLG_HAS_PREHEAD = 0x0800, // lpHead is known to be a preHead for this loop LPFLG_REMOVED = 0x1000, // has been removed from the loop table (unrolled or optimized away) LPFLG_DONT_UNROLL = 0x2000, // do not unroll this loop LPFLG_ASGVARS_YES = 0x4000, // "lpAsgVars" has been computed LPFLG_ASGVARS_INC = 0x8000, // "lpAsgVars" is incomplete -- vars beyond those representable in an AllVarSet // type are assigned to. }; inline constexpr LoopFlags operator~(LoopFlags a) { return (LoopFlags)(~(unsigned short)a); } inline constexpr LoopFlags operator|(LoopFlags a, LoopFlags b) { return (LoopFlags)((unsigned short)a | (unsigned short)b); } inline constexpr LoopFlags operator&(LoopFlags a, LoopFlags b) { return (LoopFlags)((unsigned short)a & (unsigned short)b); } inline LoopFlags& operator|=(LoopFlags& a, LoopFlags b) { return a = (LoopFlags)((unsigned short)a | (unsigned short)b); } inline LoopFlags& operator&=(LoopFlags& a, LoopFlags b) { return a = (LoopFlags)((unsigned short)a & (unsigned short)b); } // The following holds information about instr offsets in terms of generated code. enum class IPmappingDscKind { Prolog, // The mapping represents the start of a prolog. Epilog, // The mapping represents the start of an epilog. NoMapping, // This does not map to any IL offset. Normal, // The mapping maps to an IL offset. }; struct IPmappingDsc { emitLocation ipmdNativeLoc; // the emitter location of the native code corresponding to the IL offset IPmappingDscKind ipmdKind; // The kind of mapping ILLocation ipmdLoc; // The location for normal mappings bool ipmdIsLabel; // Can this code be a branch label? }; struct PreciseIPMapping { emitLocation nativeLoc; DebugInfo debugInfo; }; /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX The big guy. The sections are currently organized as : XX XX XX XX o GenTree and BasicBlock XX XX o LclVarsInfo XX XX o Importer XX XX o FlowGraph XX XX o Optimizer XX XX o RegAlloc XX XX o EEInterface XX XX o TempsInfo XX XX o RegSet XX XX o GCInfo XX XX o Instruction XX XX o ScopeInfo XX XX o PrologScopeInfo XX XX o CodeGenerator XX XX o UnwindInfo XX XX o Compiler XX XX o typeInfo XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ struct HWIntrinsicInfo; class Compiler { friend class emitter; friend class UnwindInfo; friend class UnwindFragmentInfo; friend class UnwindEpilogInfo; friend class JitTimer; friend class LinearScan; friend class fgArgInfo; friend class Rationalizer; friend class Phase; friend class Lowering; friend class CSE_DataFlow; friend class CSE_Heuristic; friend class CodeGenInterface; friend class CodeGen; friend class LclVarDsc; friend class TempDsc; friend class LIR; friend class ObjectAllocator; friend class LocalAddressVisitor; friend struct GenTree; friend class MorphInitBlockHelper; friend class MorphCopyBlockHelper; #ifdef FEATURE_HW_INTRINSICS friend struct HWIntrinsicInfo; #endif // FEATURE_HW_INTRINSICS #ifndef TARGET_64BIT friend class DecomposeLongs; #endif // !TARGET_64BIT /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Misc structs definitions XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: hashBvGlobalData hbvGlobalData; // Used by the hashBv bitvector package. #ifdef DEBUG bool verbose; bool verboseTrees; bool shouldUseVerboseTrees(); bool asciiTrees; // If true, dump trees using only ASCII characters bool shouldDumpASCIITrees(); bool verboseSsa; // If true, produce especially verbose dump output in SSA construction. bool shouldUseVerboseSsa(); bool treesBeforeAfterMorph; // If true, print trees before/after morphing (paired by an intra-compilation id: int morphNum; // This counts the the trees that have been morphed, allowing us to label each uniquely. bool doExtraSuperPmiQueries; void makeExtraStructQueries(CORINFO_CLASS_HANDLE structHandle, int level); // Make queries recursively 'level' deep. const char* VarNameToStr(VarName name) { return name; } DWORD expensiveDebugCheckLevel; #endif #if FEATURE_MULTIREG_RET GenTree* impAssignMultiRegTypeToVar(GenTree* op, CORINFO_CLASS_HANDLE hClass DEBUGARG(CorInfoCallConvExtension callConv)); #endif // FEATURE_MULTIREG_RET #ifdef TARGET_X86 bool isTrivialPointerSizedStruct(CORINFO_CLASS_HANDLE clsHnd) const; #endif // TARGET_X86 //------------------------------------------------------------------------- // Functions to handle homogeneous floating-point aggregates (HFAs) in ARM/ARM64. // HFAs are one to four element structs where each element is the same // type, either all float or all double. We handle HVAs (one to four elements of // vector types) uniformly with HFAs. HFAs are treated specially // in the ARM/ARM64 Procedure Call Standards, specifically, they are passed in // floating-point registers instead of the general purpose registers. // bool IsHfa(CORINFO_CLASS_HANDLE hClass); bool IsHfa(GenTree* tree); var_types GetHfaType(GenTree* tree); unsigned GetHfaCount(GenTree* tree); var_types GetHfaType(CORINFO_CLASS_HANDLE hClass); unsigned GetHfaCount(CORINFO_CLASS_HANDLE hClass); bool IsMultiRegReturnedType(CORINFO_CLASS_HANDLE hClass, CorInfoCallConvExtension callConv); //------------------------------------------------------------------------- // The following is used for validating format of EH table // struct EHNodeDsc; typedef struct EHNodeDsc* pEHNodeDsc; EHNodeDsc* ehnTree; // root of the tree comprising the EHnodes. EHNodeDsc* ehnNext; // root of the tree comprising the EHnodes. struct EHNodeDsc { enum EHBlockType { TryNode, FilterNode, HandlerNode, FinallyNode, FaultNode }; EHBlockType ehnBlockType; // kind of EH block IL_OFFSET ehnStartOffset; // IL offset of start of the EH block IL_OFFSET ehnEndOffset; // IL offset past end of the EH block. (TODO: looks like verInsertEhNode() sets this to // the last IL offset, not "one past the last one", i.e., the range Start to End is // inclusive). pEHNodeDsc ehnNext; // next (non-nested) block in sequential order pEHNodeDsc ehnChild; // leftmost nested block union { pEHNodeDsc ehnTryNode; // for filters and handlers, the corresponding try node pEHNodeDsc ehnHandlerNode; // for a try node, the corresponding handler node }; pEHNodeDsc ehnFilterNode; // if this is a try node and has a filter, otherwise 0 pEHNodeDsc ehnEquivalent; // if blockType=tryNode, start offset and end offset is same, void ehnSetTryNodeType() { ehnBlockType = TryNode; } void ehnSetFilterNodeType() { ehnBlockType = FilterNode; } void ehnSetHandlerNodeType() { ehnBlockType = HandlerNode; } void ehnSetFinallyNodeType() { ehnBlockType = FinallyNode; } void ehnSetFaultNodeType() { ehnBlockType = FaultNode; } bool ehnIsTryBlock() { return ehnBlockType == TryNode; } bool ehnIsFilterBlock() { return ehnBlockType == FilterNode; } bool ehnIsHandlerBlock() { return ehnBlockType == HandlerNode; } bool ehnIsFinallyBlock() { return ehnBlockType == FinallyNode; } bool ehnIsFaultBlock() { return ehnBlockType == FaultNode; } // returns true if there is any overlap between the two nodes static bool ehnIsOverlap(pEHNodeDsc node1, pEHNodeDsc node2) { if (node1->ehnStartOffset < node2->ehnStartOffset) { return (node1->ehnEndOffset >= node2->ehnStartOffset); } else { return (node1->ehnStartOffset <= node2->ehnEndOffset); } } // fails with BADCODE if inner is not completely nested inside outer static bool ehnIsNested(pEHNodeDsc inner, pEHNodeDsc outer) { return ((inner->ehnStartOffset >= outer->ehnStartOffset) && (inner->ehnEndOffset <= outer->ehnEndOffset)); } }; //------------------------------------------------------------------------- // Exception handling functions // #if !defined(FEATURE_EH_FUNCLETS) bool ehNeedsShadowSPslots() { return (info.compXcptnsCount || opts.compDbgEnC); } // 0 for methods with no EH // 1 for methods with non-nested EH, or where only the try blocks are nested // 2 for a method with a catch within a catch // etc. unsigned ehMaxHndNestingCount; #endif // !FEATURE_EH_FUNCLETS static bool jitIsBetween(unsigned value, unsigned start, unsigned end); static bool jitIsBetweenInclusive(unsigned value, unsigned start, unsigned end); bool bbInCatchHandlerILRange(BasicBlock* blk); bool bbInFilterILRange(BasicBlock* blk); bool bbInTryRegions(unsigned regionIndex, BasicBlock* blk); bool bbInExnFlowRegions(unsigned regionIndex, BasicBlock* blk); bool bbInHandlerRegions(unsigned regionIndex, BasicBlock* blk); bool bbInCatchHandlerRegions(BasicBlock* tryBlk, BasicBlock* hndBlk); unsigned short bbFindInnermostCommonTryRegion(BasicBlock* bbOne, BasicBlock* bbTwo); unsigned short bbFindInnermostTryRegionContainingHandlerRegion(unsigned handlerIndex); unsigned short bbFindInnermostHandlerRegionContainingTryRegion(unsigned tryIndex); // Returns true if "block" is the start of a try region. bool bbIsTryBeg(BasicBlock* block); // Returns true if "block" is the start of a handler or filter region. bool bbIsHandlerBeg(BasicBlock* block); // Returns true iff "block" is where control flows if an exception is raised in the // try region, and sets "*regionIndex" to the index of the try for the handler. // Differs from "IsHandlerBeg" in the case of filters, where this is true for the first // block of the filter, but not for the filter's handler. bool bbIsExFlowBlock(BasicBlock* block, unsigned* regionIndex); bool ehHasCallableHandlers(); // Return the EH descriptor for the given region index. EHblkDsc* ehGetDsc(unsigned regionIndex); // Return the EH index given a region descriptor. unsigned ehGetIndex(EHblkDsc* ehDsc); // Return the EH descriptor index of the enclosing try, for the given region index. unsigned ehGetEnclosingTryIndex(unsigned regionIndex); // Return the EH descriptor index of the enclosing handler, for the given region index. unsigned ehGetEnclosingHndIndex(unsigned regionIndex); // Return the EH descriptor for the most nested 'try' region this BasicBlock is a member of (or nullptr if this // block is not in a 'try' region). EHblkDsc* ehGetBlockTryDsc(BasicBlock* block); // Return the EH descriptor for the most nested filter or handler region this BasicBlock is a member of (or nullptr // if this block is not in a filter or handler region). EHblkDsc* ehGetBlockHndDsc(BasicBlock* block); // Return the EH descriptor for the most nested region that may handle exceptions raised in this BasicBlock (or // nullptr if this block's exceptions propagate to caller). EHblkDsc* ehGetBlockExnFlowDsc(BasicBlock* block); EHblkDsc* ehIsBlockTryLast(BasicBlock* block); EHblkDsc* ehIsBlockHndLast(BasicBlock* block); bool ehIsBlockEHLast(BasicBlock* block); bool ehBlockHasExnFlowDsc(BasicBlock* block); // Return the region index of the most nested EH region this block is in. unsigned ehGetMostNestedRegionIndex(BasicBlock* block, bool* inTryRegion); // Find the true enclosing try index, ignoring 'mutual protect' try. Uses IL ranges to check. unsigned ehTrueEnclosingTryIndexIL(unsigned regionIndex); // Return the index of the most nested enclosing region for a particular EH region. Returns NO_ENCLOSING_INDEX // if there is no enclosing region. If the returned index is not NO_ENCLOSING_INDEX, then '*inTryRegion' // is set to 'true' if the enclosing region is a 'try', or 'false' if the enclosing region is a handler. // (It can never be a filter.) unsigned ehGetEnclosingRegionIndex(unsigned regionIndex, bool* inTryRegion); // A block has been deleted. Update the EH table appropriately. void ehUpdateForDeletedBlock(BasicBlock* block); // Determine whether a block can be deleted while preserving the EH normalization rules. bool ehCanDeleteEmptyBlock(BasicBlock* block); // Update the 'last' pointers in the EH table to reflect new or deleted blocks in an EH region. void ehUpdateLastBlocks(BasicBlock* oldLast, BasicBlock* newLast); // For a finally handler, find the region index that the BBJ_CALLFINALLY lives in that calls the handler, // or NO_ENCLOSING_INDEX if the BBJ_CALLFINALLY lives in the main function body. Normally, the index // is the same index as the handler (and the BBJ_CALLFINALLY lives in the 'try' region), but for AMD64 the // BBJ_CALLFINALLY lives in the enclosing try or handler region, whichever is more nested, or the main function // body. If the returned index is not NO_ENCLOSING_INDEX, then '*inTryRegion' is set to 'true' if the // BBJ_CALLFINALLY lives in the returned index's 'try' region, or 'false' if lives in the handler region. (It never // lives in a filter.) unsigned ehGetCallFinallyRegionIndex(unsigned finallyIndex, bool* inTryRegion); // Find the range of basic blocks in which all BBJ_CALLFINALLY will be found that target the 'finallyIndex' region's // handler. Set begBlk to the first block, and endBlk to the block after the last block of the range // (nullptr if the last block is the last block in the program). // Precondition: 'finallyIndex' is the EH region of a try/finally clause. void ehGetCallFinallyBlockRange(unsigned finallyIndex, BasicBlock** begBlk, BasicBlock** endBlk); #ifdef DEBUG // Given a BBJ_CALLFINALLY block and the EH region index of the finally it is calling, return // 'true' if the BBJ_CALLFINALLY is in the correct EH region. bool ehCallFinallyInCorrectRegion(BasicBlock* blockCallFinally, unsigned finallyIndex); #endif // DEBUG #if defined(FEATURE_EH_FUNCLETS) // Do we need a PSPSym in the main function? For codegen purposes, we only need one // if there is a filter that protects a region with a nested EH clause (such as a // try/catch nested in the 'try' body of a try/filter/filter-handler). See // genFuncletProlog() for more details. However, the VM seems to use it for more // purposes, maybe including debugging. Until we are sure otherwise, always create // a PSPSym for functions with any EH. bool ehNeedsPSPSym() const { #ifdef TARGET_X86 return false; #else // TARGET_X86 return compHndBBtabCount > 0; #endif // TARGET_X86 } bool ehAnyFunclets(); // Are there any funclets in this function? unsigned ehFuncletCount(); // Return the count of funclets in the function unsigned bbThrowIndex(BasicBlock* blk); // Get the index to use as the cache key for sharing throw blocks #else // !FEATURE_EH_FUNCLETS bool ehAnyFunclets() { return false; } unsigned ehFuncletCount() { return 0; } unsigned bbThrowIndex(BasicBlock* blk) { return blk->bbTryIndex; } // Get the index to use as the cache key for sharing throw blocks #endif // !FEATURE_EH_FUNCLETS // Returns a flowList representing the "EH predecessors" of "blk". These are the normal predecessors of // "blk", plus one special case: if "blk" is the first block of a handler, considers the predecessor(s) of the first // first block of the corresponding try region to be "EH predecessors". (If there is a single such predecessor, // for example, we want to consider that the immediate dominator of the catch clause start block, so it's // convenient to also consider it a predecessor.) flowList* BlockPredsWithEH(BasicBlock* blk); // This table is useful for memoization of the method above. typedef JitHashTable<BasicBlock*, JitPtrKeyFuncs<BasicBlock>, flowList*> BlockToFlowListMap; BlockToFlowListMap* m_blockToEHPreds; BlockToFlowListMap* GetBlockToEHPreds() { if (m_blockToEHPreds == nullptr) { m_blockToEHPreds = new (getAllocator()) BlockToFlowListMap(getAllocator()); } return m_blockToEHPreds; } void* ehEmitCookie(BasicBlock* block); UNATIVE_OFFSET ehCodeOffset(BasicBlock* block); EHblkDsc* ehInitHndRange(BasicBlock* src, IL_OFFSET* hndBeg, IL_OFFSET* hndEnd, bool* inFilter); EHblkDsc* ehInitTryRange(BasicBlock* src, IL_OFFSET* tryBeg, IL_OFFSET* tryEnd); EHblkDsc* ehInitHndBlockRange(BasicBlock* blk, BasicBlock** hndBeg, BasicBlock** hndLast, bool* inFilter); EHblkDsc* ehInitTryBlockRange(BasicBlock* blk, BasicBlock** tryBeg, BasicBlock** tryLast); void fgSetTryBeg(EHblkDsc* handlerTab, BasicBlock* newTryBeg); void fgSetTryEnd(EHblkDsc* handlerTab, BasicBlock* newTryLast); void fgSetHndEnd(EHblkDsc* handlerTab, BasicBlock* newHndLast); void fgSkipRmvdBlocks(EHblkDsc* handlerTab); void fgAllocEHTable(); void fgRemoveEHTableEntry(unsigned XTnum); #if defined(FEATURE_EH_FUNCLETS) EHblkDsc* fgAddEHTableEntry(unsigned XTnum); #endif // FEATURE_EH_FUNCLETS #if !FEATURE_EH void fgRemoveEH(); #endif // !FEATURE_EH void fgSortEHTable(); // Causes the EH table to obey some well-formedness conditions, by inserting // empty BB's when necessary: // * No block is both the first block of a handler and the first block of a try. // * No block is the first block of multiple 'try' regions. // * No block is the last block of multiple EH regions. void fgNormalizeEH(); bool fgNormalizeEHCase1(); bool fgNormalizeEHCase2(); bool fgNormalizeEHCase3(); void fgCheckForLoopsInHandlers(); #ifdef DEBUG void dispIncomingEHClause(unsigned num, const CORINFO_EH_CLAUSE& clause); void dispOutgoingEHClause(unsigned num, const CORINFO_EH_CLAUSE& clause); void fgVerifyHandlerTab(); void fgDispHandlerTab(); #endif // DEBUG bool fgNeedToSortEHTable; void verInitEHTree(unsigned numEHClauses); void verInsertEhNode(CORINFO_EH_CLAUSE* clause, EHblkDsc* handlerTab); void verInsertEhNodeInTree(EHNodeDsc** ppRoot, EHNodeDsc* node); void verInsertEhNodeParent(EHNodeDsc** ppRoot, EHNodeDsc* node); void verCheckNestingLevel(EHNodeDsc* initRoot); /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX GenTree and BasicBlock XX XX XX XX Functions to allocate and display the GenTrees and BasicBlocks XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ // Functions to create nodes Statement* gtNewStmt(GenTree* expr = nullptr); Statement* gtNewStmt(GenTree* expr, const DebugInfo& di); // For unary opers. GenTree* gtNewOperNode(genTreeOps oper, var_types type, GenTree* op1, bool doSimplifications = TRUE); // For binary opers. GenTree* gtNewOperNode(genTreeOps oper, var_types type, GenTree* op1, GenTree* op2); GenTreeColon* gtNewColonNode(var_types type, GenTree* elseNode, GenTree* thenNode); GenTreeQmark* gtNewQmarkNode(var_types type, GenTree* cond, GenTreeColon* colon); GenTree* gtNewLargeOperNode(genTreeOps oper, var_types type = TYP_I_IMPL, GenTree* op1 = nullptr, GenTree* op2 = nullptr); GenTreeIntCon* gtNewIconNode(ssize_t value, var_types type = TYP_INT); GenTreeIntCon* gtNewIconNode(unsigned fieldOffset, FieldSeqNode* fieldSeq); GenTreeIntCon* gtNewNull(); GenTreeIntCon* gtNewTrue(); GenTreeIntCon* gtNewFalse(); GenTree* gtNewPhysRegNode(regNumber reg, var_types type); GenTree* gtNewJmpTableNode(); GenTree* gtNewIndOfIconHandleNode(var_types indType, size_t value, GenTreeFlags iconFlags, bool isInvariant); GenTree* gtNewIconHandleNode(size_t value, GenTreeFlags flags, FieldSeqNode* fields = nullptr); GenTreeFlags gtTokenToIconFlags(unsigned token); GenTree* gtNewIconEmbHndNode(void* value, void* pValue, GenTreeFlags flags, void* compileTimeHandle); GenTree* gtNewIconEmbScpHndNode(CORINFO_MODULE_HANDLE scpHnd); GenTree* gtNewIconEmbClsHndNode(CORINFO_CLASS_HANDLE clsHnd); GenTree* gtNewIconEmbMethHndNode(CORINFO_METHOD_HANDLE methHnd); GenTree* gtNewIconEmbFldHndNode(CORINFO_FIELD_HANDLE fldHnd); GenTree* gtNewStringLiteralNode(InfoAccessType iat, void* pValue); GenTreeIntCon* gtNewStringLiteralLength(GenTreeStrCon* node); GenTree* gtNewLconNode(__int64 value); GenTree* gtNewDconNode(double value, var_types type = TYP_DOUBLE); GenTree* gtNewSconNode(int CPX, CORINFO_MODULE_HANDLE scpHandle); GenTree* gtNewZeroConNode(var_types type); GenTree* gtNewOneConNode(var_types type); GenTreeLclVar* gtNewStoreLclVar(unsigned dstLclNum, GenTree* src); #ifdef FEATURE_SIMD GenTree* gtNewSIMDVectorZero(var_types simdType, CorInfoType simdBaseJitType, unsigned simdSize); #endif GenTree* gtNewBlkOpNode(GenTree* dst, GenTree* srcOrFillVal, bool isVolatile, bool isCopyBlock); GenTree* gtNewPutArgReg(var_types type, GenTree* arg, regNumber argReg); GenTree* gtNewBitCastNode(var_types type, GenTree* arg); protected: void gtBlockOpInit(GenTree* result, GenTree* dst, GenTree* srcOrFillVal, bool isVolatile); public: GenTreeObj* gtNewObjNode(CORINFO_CLASS_HANDLE structHnd, GenTree* addr); void gtSetObjGcInfo(GenTreeObj* objNode); GenTree* gtNewStructVal(CORINFO_CLASS_HANDLE structHnd, GenTree* addr); GenTree* gtNewBlockVal(GenTree* addr, unsigned size); GenTree* gtNewCpObjNode(GenTree* dst, GenTree* src, CORINFO_CLASS_HANDLE structHnd, bool isVolatile); GenTreeCall::Use* gtNewCallArgs(GenTree* node); GenTreeCall::Use* gtNewCallArgs(GenTree* node1, GenTree* node2); GenTreeCall::Use* gtNewCallArgs(GenTree* node1, GenTree* node2, GenTree* node3); GenTreeCall::Use* gtNewCallArgs(GenTree* node1, GenTree* node2, GenTree* node3, GenTree* node4); GenTreeCall::Use* gtPrependNewCallArg(GenTree* node, GenTreeCall::Use* args); GenTreeCall::Use* gtInsertNewCallArgAfter(GenTree* node, GenTreeCall::Use* after); GenTreeCall* gtNewCallNode(gtCallTypes callType, CORINFO_METHOD_HANDLE handle, var_types type, GenTreeCall::Use* args, const DebugInfo& di = DebugInfo()); GenTreeCall* gtNewIndCallNode(GenTree* addr, var_types type, GenTreeCall::Use* args, const DebugInfo& di = DebugInfo()); GenTreeCall* gtNewHelperCallNode(unsigned helper, var_types type, GenTreeCall::Use* args = nullptr); GenTreeCall* gtNewRuntimeLookupHelperCallNode(CORINFO_RUNTIME_LOOKUP* pRuntimeLookup, GenTree* ctxTree, void* compileTimeHandle); GenTreeLclVar* gtNewLclvNode(unsigned lnum, var_types type DEBUGARG(IL_OFFSET offs = BAD_IL_OFFSET)); GenTreeLclVar* gtNewLclLNode(unsigned lnum, var_types type DEBUGARG(IL_OFFSET offs = BAD_IL_OFFSET)); GenTreeLclVar* gtNewLclVarAddrNode(unsigned lclNum, var_types type = TYP_I_IMPL); GenTreeLclFld* gtNewLclFldAddrNode(unsigned lclNum, unsigned lclOffs, FieldSeqNode* fieldSeq, var_types type = TYP_I_IMPL); #ifdef FEATURE_SIMD GenTreeSIMD* gtNewSIMDNode( var_types type, GenTree* op1, SIMDIntrinsicID simdIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize); GenTreeSIMD* gtNewSIMDNode(var_types type, GenTree* op1, GenTree* op2, SIMDIntrinsicID simdIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize); void SetOpLclRelatedToSIMDIntrinsic(GenTree* op); #endif #ifdef FEATURE_HW_INTRINSICS GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, GenTree* op1, GenTree* op2, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, GenTree* op1, GenTree* op2, GenTree* op3, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, GenTree* op1, GenTree* op2, GenTree* op3, GenTree* op4, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, GenTree** operands, size_t operandCount, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, IntrinsicNodeBuilder&& nodeBuilder, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode(var_types type, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize) { bool isSimdAsHWIntrinsic = true; return gtNewSimdHWIntrinsicNode(type, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); } GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode( var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize) { bool isSimdAsHWIntrinsic = true; return gtNewSimdHWIntrinsicNode(type, op1, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); } GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode(var_types type, GenTree* op1, GenTree* op2, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize) { bool isSimdAsHWIntrinsic = true; return gtNewSimdHWIntrinsicNode(type, op1, op2, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); } GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode(var_types type, GenTree* op1, GenTree* op2, GenTree* op3, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize) { bool isSimdAsHWIntrinsic = true; return gtNewSimdHWIntrinsicNode(type, op1, op2, op3, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); } GenTree* gtNewSimdAbsNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdBinOpNode(genTreeOps op, var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdCeilNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdCmpOpNode(genTreeOps op, var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdCmpOpAllNode(genTreeOps op, var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdCmpOpAnyNode(genTreeOps op, var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdCndSelNode(var_types type, GenTree* op1, GenTree* op2, GenTree* op3, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdCreateBroadcastNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdDotProdNode(var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdFloorNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdGetElementNode(var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdMaxNode(var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdMinNode(var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdNarrowNode(var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdSqrtNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdSumNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdUnOpNode(genTreeOps op, var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdWidenLowerNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdWidenUpperNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdWithElementNode(var_types type, GenTree* op1, GenTree* op2, GenTree* op3, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdZeroNode(var_types type, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTreeHWIntrinsic* gtNewScalarHWIntrinsicNode(var_types type, NamedIntrinsic hwIntrinsicID); GenTreeHWIntrinsic* gtNewScalarHWIntrinsicNode(var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID); GenTreeHWIntrinsic* gtNewScalarHWIntrinsicNode(var_types type, GenTree* op1, GenTree* op2, NamedIntrinsic hwIntrinsicID); GenTreeHWIntrinsic* gtNewScalarHWIntrinsicNode( var_types type, GenTree* op1, GenTree* op2, GenTree* op3, NamedIntrinsic hwIntrinsicID); CORINFO_CLASS_HANDLE gtGetStructHandleForHWSIMD(var_types simdType, CorInfoType simdBaseJitType); CorInfoType getBaseJitTypeFromArgIfNeeded(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, CorInfoType simdBaseJitType); #endif // FEATURE_HW_INTRINSICS GenTree* gtNewMustThrowException(unsigned helper, var_types type, CORINFO_CLASS_HANDLE clsHnd); GenTreeLclFld* gtNewLclFldNode(unsigned lnum, var_types type, unsigned offset); GenTree* gtNewInlineCandidateReturnExpr(GenTree* inlineCandidate, var_types type, BasicBlockFlags bbFlags); GenTreeField* gtNewFieldRef(var_types type, CORINFO_FIELD_HANDLE fldHnd, GenTree* obj = nullptr, DWORD offset = 0); GenTree* gtNewIndexRef(var_types typ, GenTree* arrayOp, GenTree* indexOp); GenTreeArrLen* gtNewArrLen(var_types typ, GenTree* arrayOp, int lenOffset, BasicBlock* block); GenTreeIndir* gtNewIndir(var_types typ, GenTree* addr); GenTree* gtNewNullCheck(GenTree* addr, BasicBlock* basicBlock); var_types gtTypeForNullCheck(GenTree* tree); void gtChangeOperToNullCheck(GenTree* tree, BasicBlock* block); static fgArgTabEntry* gtArgEntryByArgNum(GenTreeCall* call, unsigned argNum); static fgArgTabEntry* gtArgEntryByNode(GenTreeCall* call, GenTree* node); fgArgTabEntry* gtArgEntryByLateArgIndex(GenTreeCall* call, unsigned lateArgInx); static GenTree* gtArgNodeByLateArgInx(GenTreeCall* call, unsigned lateArgInx); GenTreeOp* gtNewAssignNode(GenTree* dst, GenTree* src); GenTree* gtNewTempAssign(unsigned tmp, GenTree* val, Statement** pAfterStmt = nullptr, const DebugInfo& di = DebugInfo(), BasicBlock* block = nullptr); GenTree* gtNewRefCOMfield(GenTree* objPtr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_ACCESS_FLAGS access, CORINFO_FIELD_INFO* pFieldInfo, var_types lclTyp, CORINFO_CLASS_HANDLE structType, GenTree* assg); GenTree* gtNewNothingNode(); GenTree* gtNewArgPlaceHolderNode(var_types type, CORINFO_CLASS_HANDLE clsHnd); GenTree* gtUnusedValNode(GenTree* expr); GenTree* gtNewKeepAliveNode(GenTree* op); GenTreeCast* gtNewCastNode(var_types typ, GenTree* op1, bool fromUnsigned, var_types castType); GenTreeCast* gtNewCastNodeL(var_types typ, GenTree* op1, bool fromUnsigned, var_types castType); GenTreeAllocObj* gtNewAllocObjNode( unsigned int helper, bool helperHasSideEffects, CORINFO_CLASS_HANDLE clsHnd, var_types type, GenTree* op1); GenTreeAllocObj* gtNewAllocObjNode(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool useParent); GenTree* gtNewRuntimeLookup(CORINFO_GENERIC_HANDLE hnd, CorInfoGenericHandleType hndTyp, GenTree* lookupTree); GenTreeIndir* gtNewMethodTableLookup(GenTree* obj); //------------------------------------------------------------------------ // Other GenTree functions GenTree* gtClone(GenTree* tree, bool complexOK = false); // If `tree` is a lclVar with lclNum `varNum`, return an IntCns with value `varVal`; otherwise, // create a copy of `tree`, adding specified flags, replacing uses of lclVar `deepVarNum` with // IntCnses with value `deepVarVal`. GenTree* gtCloneExpr( GenTree* tree, GenTreeFlags addFlags, unsigned varNum, int varVal, unsigned deepVarNum, int deepVarVal); // Create a copy of `tree`, optionally adding specifed flags, and optionally mapping uses of local // `varNum` to int constants with value `varVal`. GenTree* gtCloneExpr(GenTree* tree, GenTreeFlags addFlags = GTF_EMPTY, unsigned varNum = BAD_VAR_NUM, int varVal = 0) { return gtCloneExpr(tree, addFlags, varNum, varVal, varNum, varVal); } Statement* gtCloneStmt(Statement* stmt) { GenTree* exprClone = gtCloneExpr(stmt->GetRootNode()); return gtNewStmt(exprClone, stmt->GetDebugInfo()); } // Internal helper for cloning a call GenTreeCall* gtCloneExprCallHelper(GenTreeCall* call, GenTreeFlags addFlags = GTF_EMPTY, unsigned deepVarNum = BAD_VAR_NUM, int deepVarVal = 0); // Create copy of an inline or guarded devirtualization candidate tree. GenTreeCall* gtCloneCandidateCall(GenTreeCall* call); void gtUpdateSideEffects(Statement* stmt, GenTree* tree); void gtUpdateTreeAncestorsSideEffects(GenTree* tree); void gtUpdateStmtSideEffects(Statement* stmt); void gtUpdateNodeSideEffects(GenTree* tree); void gtUpdateNodeOperSideEffects(GenTree* tree); void gtUpdateNodeOperSideEffectsPost(GenTree* tree); // Returns "true" iff the complexity (not formally defined, but first interpretation // is #of nodes in subtree) of "tree" is greater than "limit". // (This is somewhat redundant with the "GetCostEx()/GetCostSz()" fields, but can be used // before they have been set.) bool gtComplexityExceeds(GenTree** tree, unsigned limit); GenTree* gtReverseCond(GenTree* tree); static bool gtHasRef(GenTree* tree, ssize_t lclNum); bool gtHasLocalsWithAddrOp(GenTree* tree); unsigned gtSetCallArgsOrder(const GenTreeCall::UseList& args, bool lateArgs, int* callCostEx, int* callCostSz); unsigned gtSetMultiOpOrder(GenTreeMultiOp* multiOp); void gtWalkOp(GenTree** op1, GenTree** op2, GenTree* base, bool constOnly); #ifdef DEBUG unsigned gtHashValue(GenTree* tree); GenTree* gtWalkOpEffectiveVal(GenTree* op); #endif void gtPrepareCost(GenTree* tree); bool gtIsLikelyRegVar(GenTree* tree); // Returns true iff the secondNode can be swapped with firstNode. bool gtCanSwapOrder(GenTree* firstNode, GenTree* secondNode); // Given an address expression, compute its costs and addressing mode opportunities, // and mark addressing mode candidates as GTF_DONT_CSE. // TODO-Throughput - Consider actually instantiating these early, to avoid // having to re-run the algorithm that looks for them (might also improve CQ). bool gtMarkAddrMode(GenTree* addr, int* costEx, int* costSz, var_types type); unsigned gtSetEvalOrder(GenTree* tree); void gtSetStmtInfo(Statement* stmt); // Returns "true" iff "node" has any of the side effects in "flags". bool gtNodeHasSideEffects(GenTree* node, GenTreeFlags flags); // Returns "true" iff "tree" or its (transitive) children have any of the side effects in "flags". bool gtTreeHasSideEffects(GenTree* tree, GenTreeFlags flags); // Appends 'expr' in front of 'list' // 'list' will typically start off as 'nullptr' // when 'list' is non-null a GT_COMMA node is used to insert 'expr' GenTree* gtBuildCommaList(GenTree* list, GenTree* expr); void gtExtractSideEffList(GenTree* expr, GenTree** pList, GenTreeFlags GenTreeFlags = GTF_SIDE_EFFECT, bool ignoreRoot = false); GenTree* gtGetThisArg(GenTreeCall* call); // Static fields of struct types (and sometimes the types that those are reduced to) are represented by having the // static field contain an object pointer to the boxed struct. This simplifies the GC implementation...but // complicates the JIT somewhat. This predicate returns "true" iff a node with type "fieldNodeType", representing // the given "fldHnd", is such an object pointer. bool gtIsStaticFieldPtrToBoxedStruct(var_types fieldNodeType, CORINFO_FIELD_HANDLE fldHnd); // Return true if call is a recursive call; return false otherwise. // Note when inlining, this looks for calls back to the root method. bool gtIsRecursiveCall(GenTreeCall* call) { return gtIsRecursiveCall(call->gtCallMethHnd); } bool gtIsRecursiveCall(CORINFO_METHOD_HANDLE callMethodHandle) { return (callMethodHandle == impInlineRoot()->info.compMethodHnd); } //------------------------------------------------------------------------- GenTree* gtFoldExpr(GenTree* tree); GenTree* gtFoldExprConst(GenTree* tree); GenTree* gtFoldExprSpecial(GenTree* tree); GenTree* gtFoldBoxNullable(GenTree* tree); GenTree* gtFoldExprCompare(GenTree* tree); GenTree* gtCreateHandleCompare(genTreeOps oper, GenTree* op1, GenTree* op2, CorInfoInlineTypeCheck typeCheckInliningResult); GenTree* gtFoldExprCall(GenTreeCall* call); GenTree* gtFoldTypeCompare(GenTree* tree); GenTree* gtFoldTypeEqualityCall(bool isEq, GenTree* op1, GenTree* op2); // Options to control behavior of gtTryRemoveBoxUpstreamEffects enum BoxRemovalOptions { BR_REMOVE_AND_NARROW, // remove effects, minimize remaining work, return possibly narrowed source tree BR_REMOVE_AND_NARROW_WANT_TYPE_HANDLE, // remove effects and minimize remaining work, return type handle tree BR_REMOVE_BUT_NOT_NARROW, // remove effects, return original source tree BR_DONT_REMOVE, // check if removal is possible, return copy source tree BR_DONT_REMOVE_WANT_TYPE_HANDLE, // check if removal is possible, return type handle tree BR_MAKE_LOCAL_COPY // revise box to copy to temp local and return local's address }; GenTree* gtTryRemoveBoxUpstreamEffects(GenTree* tree, BoxRemovalOptions options = BR_REMOVE_AND_NARROW); GenTree* gtOptimizeEnumHasFlag(GenTree* thisOp, GenTree* flagOp); //------------------------------------------------------------------------- // Get the handle, if any. CORINFO_CLASS_HANDLE gtGetStructHandleIfPresent(GenTree* tree); // Get the handle, and assert if not found. CORINFO_CLASS_HANDLE gtGetStructHandle(GenTree* tree); // Get the handle for a ref type. CORINFO_CLASS_HANDLE gtGetClassHandle(GenTree* tree, bool* pIsExact, bool* pIsNonNull); // Get the class handle for an helper call CORINFO_CLASS_HANDLE gtGetHelperCallClassHandle(GenTreeCall* call, bool* pIsExact, bool* pIsNonNull); // Get the element handle for an array of ref type. CORINFO_CLASS_HANDLE gtGetArrayElementClassHandle(GenTree* array); // Get a class handle from a helper call argument CORINFO_CLASS_HANDLE gtGetHelperArgClassHandle(GenTree* array); // Get the class handle for a field CORINFO_CLASS_HANDLE gtGetFieldClassHandle(CORINFO_FIELD_HANDLE fieldHnd, bool* pIsExact, bool* pIsNonNull); // Check if this tree is a gc static base helper call bool gtIsStaticGCBaseHelperCall(GenTree* tree); //------------------------------------------------------------------------- // Functions to display the trees #ifdef DEBUG void gtDispNode(GenTree* tree, IndentStack* indentStack, _In_z_ const char* msg, bool isLIR); void gtDispConst(GenTree* tree); void gtDispLeaf(GenTree* tree, IndentStack* indentStack); void gtDispNodeName(GenTree* tree); #if FEATURE_MULTIREG_RET unsigned gtDispMultiRegCount(GenTree* tree); #endif void gtDispRegVal(GenTree* tree); void gtDispZeroFieldSeq(GenTree* tree); void gtDispVN(GenTree* tree); void gtDispCommonEndLine(GenTree* tree); enum IndentInfo { IINone, IIArc, IIArcTop, IIArcBottom, IIEmbedded, IIError, IndentInfoCount }; void gtDispChild(GenTree* child, IndentStack* indentStack, IndentInfo arcType, _In_opt_ const char* msg = nullptr, bool topOnly = false); void gtDispTree(GenTree* tree, IndentStack* indentStack = nullptr, _In_opt_ const char* msg = nullptr, bool topOnly = false, bool isLIR = false); void gtGetLclVarNameInfo(unsigned lclNum, const char** ilKindOut, const char** ilNameOut, unsigned* ilNumOut); int gtGetLclVarName(unsigned lclNum, char* buf, unsigned buf_remaining); char* gtGetLclVarName(unsigned lclNum); void gtDispLclVar(unsigned lclNum, bool padForBiggestDisp = true); void gtDispLclVarStructType(unsigned lclNum); void gtDispClassLayout(ClassLayout* layout, var_types type); void gtDispILLocation(const ILLocation& loc); void gtDispStmt(Statement* stmt, const char* msg = nullptr); void gtDispBlockStmts(BasicBlock* block); void gtGetArgMsg(GenTreeCall* call, GenTree* arg, unsigned argNum, char* bufp, unsigned bufLength); void gtGetLateArgMsg(GenTreeCall* call, GenTree* arg, int argNum, char* bufp, unsigned bufLength); void gtDispArgList(GenTreeCall* call, GenTree* lastCallOperand, IndentStack* indentStack); void gtDispAnyFieldSeq(FieldSeqNode* fieldSeq); void gtDispFieldSeq(FieldSeqNode* pfsn); void gtDispRange(LIR::ReadOnlyRange const& range); void gtDispTreeRange(LIR::Range& containingRange, GenTree* tree); void gtDispLIRNode(GenTree* node, const char* prefixMsg = nullptr); #endif // For tree walks enum fgWalkResult { WALK_CONTINUE, WALK_SKIP_SUBTREES, WALK_ABORT }; struct fgWalkData; typedef fgWalkResult(fgWalkPreFn)(GenTree** pTree, fgWalkData* data); typedef fgWalkResult(fgWalkPostFn)(GenTree** pTree, fgWalkData* data); static fgWalkPreFn gtMarkColonCond; static fgWalkPreFn gtClearColonCond; struct FindLinkData { GenTree* nodeToFind; GenTree** result; GenTree* parent; }; FindLinkData gtFindLink(Statement* stmt, GenTree* node); bool gtHasCatchArg(GenTree* tree); typedef ArrayStack<GenTree*> GenTreeStack; static bool gtHasCallOnStack(GenTreeStack* parentStack); //========================================================================= // BasicBlock functions #ifdef DEBUG // This is a debug flag we will use to assert when creating block during codegen // as this interferes with procedure splitting. If you know what you're doing, set // it to true before creating the block. (DEBUG only) bool fgSafeBasicBlockCreation; #endif BasicBlock* bbNewBasicBlock(BBjumpKinds jumpKind); void placeLoopAlignInstructions(); /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX LclVarsInfo XX XX XX XX The variables to be used by the code generator. XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ // // For both PROMOTION_TYPE_NONE and PROMOTION_TYPE_DEPENDENT the struct will // be placed in the stack frame and it's fields must be laid out sequentially. // // For PROMOTION_TYPE_INDEPENDENT each of the struct's fields is replaced by // a local variable that can be enregistered or placed in the stack frame. // The fields do not need to be laid out sequentially // enum lvaPromotionType { PROMOTION_TYPE_NONE, // The struct local is not promoted PROMOTION_TYPE_INDEPENDENT, // The struct local is promoted, // and its field locals are independent of its parent struct local. PROMOTION_TYPE_DEPENDENT // The struct local is promoted, // but its field locals depend on its parent struct local. }; /*****************************************************************************/ enum FrameLayoutState { NO_FRAME_LAYOUT, INITIAL_FRAME_LAYOUT, PRE_REGALLOC_FRAME_LAYOUT, REGALLOC_FRAME_LAYOUT, TENTATIVE_FRAME_LAYOUT, FINAL_FRAME_LAYOUT }; public: RefCountState lvaRefCountState; // Current local ref count state bool lvaLocalVarRefCounted() const { return lvaRefCountState == RCS_NORMAL; } bool lvaTrackedFixed; // true: We cannot add new 'tracked' variable unsigned lvaCount; // total number of locals, which includes function arguments, // special arguments, IL local variables, and JIT temporary variables LclVarDsc* lvaTable; // variable descriptor table unsigned lvaTableCnt; // lvaTable size (>= lvaCount) unsigned lvaTrackedCount; // actual # of locals being tracked unsigned lvaTrackedCountInSizeTUnits; // min # of size_t's sufficient to hold a bit for all the locals being tracked #ifdef DEBUG VARSET_TP lvaTrackedVars; // set of tracked variables #endif #ifndef TARGET_64BIT VARSET_TP lvaLongVars; // set of long (64-bit) variables #endif VARSET_TP lvaFloatVars; // set of floating-point (32-bit and 64-bit) variables unsigned lvaCurEpoch; // VarSets are relative to a specific set of tracked var indices. // It that changes, this changes. VarSets from different epochs // cannot be meaningfully combined. unsigned GetCurLVEpoch() { return lvaCurEpoch; } // reverse map of tracked number to var number unsigned lvaTrackedToVarNumSize; unsigned* lvaTrackedToVarNum; #if DOUBLE_ALIGN #ifdef DEBUG // # of procs compiled a with double-aligned stack static unsigned s_lvaDoubleAlignedProcsCount; #endif #endif // Getters and setters for address-exposed and do-not-enregister local var properties. bool lvaVarAddrExposed(unsigned varNum) const; void lvaSetVarAddrExposed(unsigned varNum DEBUGARG(AddressExposedReason reason)); void lvaSetVarLiveInOutOfHandler(unsigned varNum); bool lvaVarDoNotEnregister(unsigned varNum); void lvSetMinOptsDoNotEnreg(); bool lvaEnregEHVars; bool lvaEnregMultiRegVars; void lvaSetVarDoNotEnregister(unsigned varNum DEBUGARG(DoNotEnregisterReason reason)); unsigned lvaVarargsHandleArg; #ifdef TARGET_X86 unsigned lvaVarargsBaseOfStkArgs; // Pointer (computed based on incoming varargs handle) to the start of the stack // arguments #endif // TARGET_X86 unsigned lvaInlinedPInvokeFrameVar; // variable representing the InlinedCallFrame unsigned lvaReversePInvokeFrameVar; // variable representing the reverse PInvoke frame #if FEATURE_FIXED_OUT_ARGS unsigned lvaPInvokeFrameRegSaveVar; // variable representing the RegSave for PInvoke inlining. #endif unsigned lvaMonAcquired; // boolean variable introduced into in synchronized methods // that tracks whether the lock has been taken unsigned lvaArg0Var; // The lclNum of arg0. Normally this will be info.compThisArg. // However, if there is a "ldarga 0" or "starg 0" in the IL, // we will redirect all "ldarg(a) 0" and "starg 0" to this temp. unsigned lvaInlineeReturnSpillTemp; // The temp to spill the non-VOID return expression // in case there are multiple BBJ_RETURN blocks in the inlinee // or if the inlinee has GC ref locals. #if FEATURE_FIXED_OUT_ARGS unsigned lvaOutgoingArgSpaceVar; // dummy TYP_LCLBLK var for fixed outgoing argument space PhasedVar<unsigned> lvaOutgoingArgSpaceSize; // size of fixed outgoing argument space #endif // FEATURE_FIXED_OUT_ARGS static unsigned GetOutgoingArgByteSize(unsigned sizeWithoutPadding) { return roundUp(sizeWithoutPadding, TARGET_POINTER_SIZE); } // Variable representing the return address. The helper-based tailcall // mechanism passes the address of the return address to a runtime helper // where it is used to detect tail-call chains. unsigned lvaRetAddrVar; #if defined(DEBUG) && defined(TARGET_XARCH) unsigned lvaReturnSpCheck; // Stores SP to confirm it is not corrupted on return. #endif // defined(DEBUG) && defined(TARGET_XARCH) #if defined(DEBUG) && defined(TARGET_X86) unsigned lvaCallSpCheck; // Stores SP to confirm it is not corrupted after every call. #endif // defined(DEBUG) && defined(TARGET_X86) bool lvaGenericsContextInUse; bool lvaKeepAliveAndReportThis(); // Synchronized instance method of a reference type, or // CORINFO_GENERICS_CTXT_FROM_THIS? bool lvaReportParamTypeArg(); // Exceptions and CORINFO_GENERICS_CTXT_FROM_PARAMTYPEARG? //------------------------------------------------------------------------- // All these frame offsets are inter-related and must be kept in sync #if !defined(FEATURE_EH_FUNCLETS) // This is used for the callable handlers unsigned lvaShadowSPslotsVar; // TYP_BLK variable for all the shadow SP slots #endif // FEATURE_EH_FUNCLETS int lvaCachedGenericContextArgOffs; int lvaCachedGenericContextArgOffset(); // For CORINFO_CALLCONV_PARAMTYPE and if generic context is passed as // THIS pointer #ifdef JIT32_GCENCODER unsigned lvaLocAllocSPvar; // variable which stores the value of ESP after the the last alloca/localloc #endif // JIT32_GCENCODER unsigned lvaNewObjArrayArgs; // variable with arguments for new MD array helper // TODO-Review: Prior to reg predict we reserve 24 bytes for Spill temps. // after the reg predict we will use a computed maxTmpSize // which is based upon the number of spill temps predicted by reg predict // All this is necessary because if we under-estimate the size of the spill // temps we could fail when encoding instructions that reference stack offsets for ARM. // // Pre codegen max spill temp size. static const unsigned MAX_SPILL_TEMP_SIZE = 24; //------------------------------------------------------------------------- unsigned lvaGetMaxSpillTempSize(); #ifdef TARGET_ARM bool lvaIsPreSpilled(unsigned lclNum, regMaskTP preSpillMask); #endif // TARGET_ARM void lvaAssignFrameOffsets(FrameLayoutState curState); void lvaFixVirtualFrameOffsets(); void lvaUpdateArgWithInitialReg(LclVarDsc* varDsc); void lvaUpdateArgsWithInitialReg(); void lvaAssignVirtualFrameOffsetsToArgs(); #ifdef UNIX_AMD64_ABI int lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, unsigned argSize, int argOffs, int* callerArgOffset); #else // !UNIX_AMD64_ABI int lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, unsigned argSize, int argOffs); #endif // !UNIX_AMD64_ABI void lvaAssignVirtualFrameOffsetsToLocals(); int lvaAllocLocalAndSetVirtualOffset(unsigned lclNum, unsigned size, int stkOffs); #ifdef TARGET_AMD64 // Returns true if compCalleeRegsPushed (including RBP if used as frame pointer) is even. bool lvaIsCalleeSavedIntRegCountEven(); #endif void lvaAlignFrame(); void lvaAssignFrameOffsetsToPromotedStructs(); int lvaAllocateTemps(int stkOffs, bool mustDoubleAlign); #ifdef DEBUG void lvaDumpRegLocation(unsigned lclNum); void lvaDumpFrameLocation(unsigned lclNum); void lvaDumpEntry(unsigned lclNum, FrameLayoutState curState, size_t refCntWtdWidth = 6); void lvaTableDump(FrameLayoutState curState = NO_FRAME_LAYOUT); // NO_FRAME_LAYOUT means use the current frame // layout state defined by lvaDoneFrameLayout #endif // Limit frames size to 1GB. The maximum is 2GB in theory - make it intentionally smaller // to avoid bugs from borderline cases. #define MAX_FrameSize 0x3FFFFFFF void lvaIncrementFrameSize(unsigned size); unsigned lvaFrameSize(FrameLayoutState curState); // Returns the caller-SP-relative offset for the SP/FP relative offset determined by FP based. int lvaToCallerSPRelativeOffset(int offs, bool isFpBased, bool forRootFrame = true) const; // Returns the caller-SP-relative offset for the local variable "varNum." int lvaGetCallerSPRelativeOffset(unsigned varNum); // Returns the SP-relative offset for the local variable "varNum". Illegal to ask this for functions with localloc. int lvaGetSPRelativeOffset(unsigned varNum); int lvaToInitialSPRelativeOffset(unsigned offset, bool isFpBased); int lvaGetInitialSPRelativeOffset(unsigned varNum); // True if this is an OSR compilation and this local is potentially // located on the original method stack frame. bool lvaIsOSRLocal(unsigned varNum); //------------------------ For splitting types ---------------------------- void lvaInitTypeRef(); void lvaInitArgs(InitVarDscInfo* varDscInfo); void lvaInitThisPtr(InitVarDscInfo* varDscInfo); void lvaInitRetBuffArg(InitVarDscInfo* varDscInfo, bool useFixedRetBufReg); void lvaInitUserArgs(InitVarDscInfo* varDscInfo, unsigned skipArgs, unsigned takeArgs); void lvaInitGenericsCtxt(InitVarDscInfo* varDscInfo); void lvaInitVarArgsHandle(InitVarDscInfo* varDscInfo); void lvaInitVarDsc(LclVarDsc* varDsc, unsigned varNum, CorInfoType corInfoType, CORINFO_CLASS_HANDLE typeHnd, CORINFO_ARG_LIST_HANDLE varList, CORINFO_SIG_INFO* varSig); static unsigned lvaTypeRefMask(var_types type); var_types lvaGetActualType(unsigned lclNum); var_types lvaGetRealType(unsigned lclNum); //------------------------------------------------------------------------- void lvaInit(); LclVarDsc* lvaGetDesc(unsigned lclNum) { assert(lclNum < lvaCount); return &lvaTable[lclNum]; } LclVarDsc* lvaGetDesc(unsigned lclNum) const { assert(lclNum < lvaCount); return &lvaTable[lclNum]; } LclVarDsc* lvaGetDesc(const GenTreeLclVarCommon* lclVar) { return lvaGetDesc(lclVar->GetLclNum()); } unsigned lvaTrackedIndexToLclNum(unsigned trackedIndex) { assert(trackedIndex < lvaTrackedCount); unsigned lclNum = lvaTrackedToVarNum[trackedIndex]; assert(lclNum < lvaCount); return lclNum; } LclVarDsc* lvaGetDescByTrackedIndex(unsigned trackedIndex) { return lvaGetDesc(lvaTrackedIndexToLclNum(trackedIndex)); } unsigned lvaGetLclNum(const LclVarDsc* varDsc) { assert((lvaTable <= varDsc) && (varDsc < lvaTable + lvaCount)); // varDsc must point within the table assert(((char*)varDsc - (char*)lvaTable) % sizeof(LclVarDsc) == 0); // varDsc better not point in the middle of a variable unsigned varNum = (unsigned)(varDsc - lvaTable); assert(varDsc == &lvaTable[varNum]); return varNum; } unsigned lvaLclSize(unsigned varNum); unsigned lvaLclExactSize(unsigned varNum); bool lvaHaveManyLocals() const; unsigned lvaGrabTemp(bool shortLifetime DEBUGARG(const char* reason)); unsigned lvaGrabTemps(unsigned cnt DEBUGARG(const char* reason)); unsigned lvaGrabTempWithImplicitUse(bool shortLifetime DEBUGARG(const char* reason)); void lvaSortByRefCount(); void lvaMarkLocalVars(); // Local variable ref-counting void lvaComputeRefCounts(bool isRecompute, bool setSlotNumbers); void lvaMarkLocalVars(BasicBlock* block, bool isRecompute); void lvaAllocOutgoingArgSpaceVar(); // Set up lvaOutgoingArgSpaceVar VARSET_VALRET_TP lvaStmtLclMask(Statement* stmt); #ifdef DEBUG struct lvaStressLclFldArgs { Compiler* m_pCompiler; bool m_bFirstPass; }; static fgWalkPreFn lvaStressLclFldCB; void lvaStressLclFld(); void lvaDispVarSet(VARSET_VALARG_TP set, VARSET_VALARG_TP allVars); void lvaDispVarSet(VARSET_VALARG_TP set); #endif #ifdef TARGET_ARM int lvaFrameAddress(int varNum, bool mustBeFPBased, regNumber* pBaseReg, int addrModeOffset, bool isFloatUsage); #else int lvaFrameAddress(int varNum, bool* pFPbased); #endif bool lvaIsParameter(unsigned varNum); bool lvaIsRegArgument(unsigned varNum); bool lvaIsOriginalThisArg(unsigned varNum); // Is this varNum the original this argument? bool lvaIsOriginalThisReadOnly(); // return true if there is no place in the code // that writes to arg0 // For x64 this is 3, 5, 6, 7, >8 byte structs that are passed by reference. // For ARM64, this is structs larger than 16 bytes that are passed by reference. bool lvaIsImplicitByRefLocal(unsigned varNum) { #if defined(TARGET_AMD64) || defined(TARGET_ARM64) LclVarDsc* varDsc = lvaGetDesc(varNum); if (varDsc->lvIsImplicitByRef) { assert(varDsc->lvIsParam); assert(varTypeIsStruct(varDsc) || (varDsc->lvType == TYP_BYREF)); return true; } #endif // defined(TARGET_AMD64) || defined(TARGET_ARM64) return false; } // Returns true if this local var is a multireg struct bool lvaIsMultiregStruct(LclVarDsc* varDsc, bool isVararg); // If the local is a TYP_STRUCT, get/set a class handle describing it CORINFO_CLASS_HANDLE lvaGetStruct(unsigned varNum); void lvaSetStruct(unsigned varNum, CORINFO_CLASS_HANDLE typeHnd, bool unsafeValueClsCheck, bool setTypeInfo = true); void lvaSetStructUsedAsVarArg(unsigned varNum); // If the local is TYP_REF, set or update the associated class information. void lvaSetClass(unsigned varNum, CORINFO_CLASS_HANDLE clsHnd, bool isExact = false); void lvaSetClass(unsigned varNum, GenTree* tree, CORINFO_CLASS_HANDLE stackHandle = nullptr); void lvaUpdateClass(unsigned varNum, CORINFO_CLASS_HANDLE clsHnd, bool isExact = false); void lvaUpdateClass(unsigned varNum, GenTree* tree, CORINFO_CLASS_HANDLE stackHandle = nullptr); #define MAX_NumOfFieldsInPromotableStruct 4 // Maximum number of fields in promotable struct // Info about struct type fields. struct lvaStructFieldInfo { CORINFO_FIELD_HANDLE fldHnd; unsigned char fldOffset; unsigned char fldOrdinal; var_types fldType; unsigned fldSize; CORINFO_CLASS_HANDLE fldTypeHnd; lvaStructFieldInfo() : fldHnd(nullptr), fldOffset(0), fldOrdinal(0), fldType(TYP_UNDEF), fldSize(0), fldTypeHnd(nullptr) { } }; // Info about a struct type, instances of which may be candidates for promotion. struct lvaStructPromotionInfo { CORINFO_CLASS_HANDLE typeHnd; bool canPromote; bool containsHoles; bool customLayout; bool fieldsSorted; unsigned char fieldCnt; lvaStructFieldInfo fields[MAX_NumOfFieldsInPromotableStruct]; lvaStructPromotionInfo(CORINFO_CLASS_HANDLE typeHnd = nullptr) : typeHnd(typeHnd) , canPromote(false) , containsHoles(false) , customLayout(false) , fieldsSorted(false) , fieldCnt(0) { } }; struct lvaFieldOffsetCmp { bool operator()(const lvaStructFieldInfo& field1, const lvaStructFieldInfo& field2); }; // This class is responsible for checking validity and profitability of struct promotion. // If it is both legal and profitable, then TryPromoteStructVar promotes the struct and initializes // nessesary information for fgMorphStructField to use. class StructPromotionHelper { public: StructPromotionHelper(Compiler* compiler); bool CanPromoteStructType(CORINFO_CLASS_HANDLE typeHnd); bool TryPromoteStructVar(unsigned lclNum); void Clear() { structPromotionInfo.typeHnd = NO_CLASS_HANDLE; } #ifdef DEBUG void CheckRetypedAsScalar(CORINFO_FIELD_HANDLE fieldHnd, var_types requestedType); #endif // DEBUG private: bool CanPromoteStructVar(unsigned lclNum); bool ShouldPromoteStructVar(unsigned lclNum); void PromoteStructVar(unsigned lclNum); void SortStructFields(); bool CanConstructAndPromoteField(lvaStructPromotionInfo* structPromotionInfo); lvaStructFieldInfo GetFieldInfo(CORINFO_FIELD_HANDLE fieldHnd, BYTE ordinal); bool TryPromoteStructField(lvaStructFieldInfo& outerFieldInfo); private: Compiler* compiler; lvaStructPromotionInfo structPromotionInfo; #ifdef DEBUG typedef JitHashTable<CORINFO_FIELD_HANDLE, JitPtrKeyFuncs<CORINFO_FIELD_STRUCT_>, var_types> RetypedAsScalarFieldsMap; RetypedAsScalarFieldsMap retypedFieldsMap; #endif // DEBUG }; StructPromotionHelper* structPromotionHelper; unsigned lvaGetFieldLocal(const LclVarDsc* varDsc, unsigned int fldOffset); lvaPromotionType lvaGetPromotionType(const LclVarDsc* varDsc); lvaPromotionType lvaGetPromotionType(unsigned varNum); lvaPromotionType lvaGetParentPromotionType(const LclVarDsc* varDsc); lvaPromotionType lvaGetParentPromotionType(unsigned varNum); bool lvaIsFieldOfDependentlyPromotedStruct(const LclVarDsc* varDsc); bool lvaIsGCTracked(const LclVarDsc* varDsc); #if defined(FEATURE_SIMD) bool lvaMapSimd12ToSimd16(const LclVarDsc* varDsc) { assert(varDsc->lvType == TYP_SIMD12); assert(varDsc->lvExactSize == 12); #if defined(TARGET_64BIT) assert(compMacOsArm64Abi() || varDsc->lvSize() == 16); #endif // defined(TARGET_64BIT) // We make local variable SIMD12 types 16 bytes instead of just 12. // lvSize() will return 16 bytes for SIMD12, even for fields. // However, we can't do that mapping if the var is a dependently promoted struct field. // Such a field must remain its exact size within its parent struct unless it is a single // field *and* it is the only field in a struct of 16 bytes. if (varDsc->lvSize() != 16) { return false; } if (lvaIsFieldOfDependentlyPromotedStruct(varDsc)) { LclVarDsc* parentVarDsc = lvaGetDesc(varDsc->lvParentLcl); return (parentVarDsc->lvFieldCnt == 1) && (parentVarDsc->lvSize() == 16); } return true; } #endif // defined(FEATURE_SIMD) unsigned lvaGSSecurityCookie; // LclVar number bool lvaTempsHaveLargerOffsetThanVars(); // Returns "true" iff local variable "lclNum" is in SSA form. bool lvaInSsa(unsigned lclNum) { assert(lclNum < lvaCount); return lvaTable[lclNum].lvInSsa; } unsigned lvaStubArgumentVar; // variable representing the secret stub argument coming in EAX #if defined(FEATURE_EH_FUNCLETS) unsigned lvaPSPSym; // variable representing the PSPSym #endif InlineInfo* impInlineInfo; // Only present for inlinees InlineStrategy* m_inlineStrategy; InlineContext* compInlineContext; // Always present // The Compiler* that is the root of the inlining tree of which "this" is a member. Compiler* impInlineRoot(); #if defined(DEBUG) || defined(INLINE_DATA) unsigned __int64 getInlineCycleCount() { return m_compCycles; } #endif // defined(DEBUG) || defined(INLINE_DATA) bool fgNoStructPromotion; // Set to TRUE to turn off struct promotion for this method. bool fgNoStructParamPromotion; // Set to TRUE to turn off struct promotion for parameters this method. //========================================================================= // PROTECTED //========================================================================= protected: //---------------- Local variable ref-counting ---------------------------- void lvaMarkLclRefs(GenTree* tree, BasicBlock* block, Statement* stmt, bool isRecompute); bool IsDominatedByExceptionalEntry(BasicBlock* block); void SetVolatileHint(LclVarDsc* varDsc); // Keeps the mapping from SSA #'s to VN's for the implicit memory variables. SsaDefArray<SsaMemDef> lvMemoryPerSsaData; public: // Returns the address of the per-Ssa data for memory at the given ssaNum (which is required // not to be the SsaConfig::RESERVED_SSA_NUM, which indicates that the variable is // not an SSA variable). SsaMemDef* GetMemoryPerSsaData(unsigned ssaNum) { return lvMemoryPerSsaData.GetSsaDef(ssaNum); } /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Importer XX XX XX XX Imports the given method and converts it to semantic trees XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ private: // For prefixFlags enum { PREFIX_TAILCALL_EXPLICIT = 0x00000001, // call has "tail" IL prefix PREFIX_TAILCALL_IMPLICIT = 0x00000010, // call is treated as having "tail" prefix even though there is no "tail" IL prefix PREFIX_TAILCALL_STRESS = 0x00000100, // call doesn't "tail" IL prefix but is treated as explicit because of tail call stress PREFIX_TAILCALL = (PREFIX_TAILCALL_EXPLICIT | PREFIX_TAILCALL_IMPLICIT | PREFIX_TAILCALL_STRESS), PREFIX_VOLATILE = 0x00001000, PREFIX_UNALIGNED = 0x00010000, PREFIX_CONSTRAINED = 0x00100000, PREFIX_READONLY = 0x01000000 }; static void impValidateMemoryAccessOpcode(const BYTE* codeAddr, const BYTE* codeEndp, bool volatilePrefix); static OPCODE impGetNonPrefixOpcode(const BYTE* codeAddr, const BYTE* codeEndp); static bool impOpcodeIsCallOpcode(OPCODE opcode); public: void impInit(); void impImport(); CORINFO_CLASS_HANDLE impGetRefAnyClass(); CORINFO_CLASS_HANDLE impGetRuntimeArgumentHandle(); CORINFO_CLASS_HANDLE impGetTypeHandleClass(); CORINFO_CLASS_HANDLE impGetStringClass(); CORINFO_CLASS_HANDLE impGetObjectClass(); // Returns underlying type of handles returned by ldtoken instruction var_types GetRuntimeHandleUnderlyingType() { // RuntimeTypeHandle is backed by raw pointer on CoreRT and by object reference on other runtimes return IsTargetAbi(CORINFO_CORERT_ABI) ? TYP_I_IMPL : TYP_REF; } void impDevirtualizeCall(GenTreeCall* call, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_METHOD_HANDLE* method, unsigned* methodFlags, CORINFO_CONTEXT_HANDLE* contextHandle, CORINFO_CONTEXT_HANDLE* exactContextHandle, bool isLateDevirtualization, bool isExplicitTailCall, IL_OFFSET ilOffset = BAD_IL_OFFSET); //========================================================================= // PROTECTED //========================================================================= protected: //-------------------- Stack manipulation --------------------------------- unsigned impStkSize; // Size of the full stack #define SMALL_STACK_SIZE 16 // number of elements in impSmallStack struct SavedStack // used to save/restore stack contents. { unsigned ssDepth; // number of values on stack StackEntry* ssTrees; // saved tree values }; bool impIsPrimitive(CorInfoType type); bool impILConsumesAddr(const BYTE* codeAddr); void impResolveToken(const BYTE* addr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoTokenKind kind); void impPushOnStack(GenTree* tree, typeInfo ti); void impPushNullObjRefOnStack(); StackEntry impPopStack(); StackEntry& impStackTop(unsigned n = 0); unsigned impStackHeight(); void impSaveStackState(SavedStack* savePtr, bool copy); void impRestoreStackState(SavedStack* savePtr); GenTree* impImportLdvirtftn(GenTree* thisPtr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo); int impBoxPatternMatch(CORINFO_RESOLVED_TOKEN* pResolvedToken, const BYTE* codeAddr, const BYTE* codeEndp, bool makeInlineObservation = false); void impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken); void impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo); bool impCanPInvokeInline(); bool impCanPInvokeInlineCallSite(BasicBlock* block); void impCheckForPInvokeCall( GenTreeCall* call, CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* sig, unsigned mflags, BasicBlock* block); GenTreeCall* impImportIndirectCall(CORINFO_SIG_INFO* sig, const DebugInfo& di = DebugInfo()); void impPopArgsForUnmanagedCall(GenTree* call, CORINFO_SIG_INFO* sig); void impInsertHelperCall(CORINFO_HELPER_DESC* helperCall); void impHandleAccessAllowed(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall); void impHandleAccessAllowedInternal(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall); var_types impImportCall(OPCODE opcode, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call on a // type parameter? GenTree* newobjThis, int prefixFlags, CORINFO_CALL_INFO* callInfo, IL_OFFSET rawILOffset); CORINFO_CLASS_HANDLE impGetSpecialIntrinsicExactReturnType(CORINFO_METHOD_HANDLE specialIntrinsicHandle); bool impMethodInfo_hasRetBuffArg(CORINFO_METHOD_INFO* methInfo, CorInfoCallConvExtension callConv); GenTree* impFixupCallStructReturn(GenTreeCall* call, CORINFO_CLASS_HANDLE retClsHnd); GenTree* impFixupStructReturnType(GenTree* op, CORINFO_CLASS_HANDLE retClsHnd, CorInfoCallConvExtension unmgdCallConv); #ifdef DEBUG var_types impImportJitTestLabelMark(int numArgs); #endif // DEBUG GenTree* impInitClass(CORINFO_RESOLVED_TOKEN* pResolvedToken); GenTree* impImportStaticReadOnlyField(void* fldAddr, var_types lclTyp); GenTree* impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_ACCESS_FLAGS access, CORINFO_FIELD_INFO* pFieldInfo, var_types lclTyp); static void impBashVarAddrsToI(GenTree* tree1, GenTree* tree2 = nullptr); GenTree* impImplicitIorI4Cast(GenTree* tree, var_types dstTyp); GenTree* impImplicitR4orR8Cast(GenTree* tree, var_types dstTyp); void impImportLeave(BasicBlock* block); void impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr); GenTree* impTypeIsAssignable(GenTree* typeTo, GenTree* typeFrom); GenTree* impStringEqualsOrStartsWith(bool startsWith, CORINFO_SIG_INFO* sig, unsigned methodFlags); GenTree* impSpanEqualsOrStartsWith(bool startsWith, CORINFO_SIG_INFO* sig, unsigned methodFlags); GenTree* impExpandHalfConstEquals(GenTreeLclVar* data, GenTree* lengthFld, bool checkForNull, bool startsWith, WCHAR* cnsData, int len, int dataOffset); GenTree* impExpandHalfConstEqualsSWAR(GenTreeLclVar* data, WCHAR* cns, int len, int dataOffset); GenTree* impExpandHalfConstEqualsSIMD(GenTreeLclVar* data, WCHAR* cns, int len, int dataOffset); GenTreeStrCon* impGetStrConFromSpan(GenTree* span); GenTree* impIntrinsic(GenTree* newobjThis, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, unsigned methodFlags, int memberRef, bool readonlyCall, bool tailCall, CORINFO_RESOLVED_TOKEN* pContstrainedResolvedToken, CORINFO_THIS_TRANSFORM constraintCallThisTransform, NamedIntrinsic* pIntrinsicName, bool* isSpecialIntrinsic = nullptr); GenTree* impMathIntrinsic(CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, var_types callType, NamedIntrinsic intrinsicName, bool tailCall); NamedIntrinsic lookupNamedIntrinsic(CORINFO_METHOD_HANDLE method); GenTree* impUnsupportedNamedIntrinsic(unsigned helper, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, bool mustExpand); #ifdef FEATURE_HW_INTRINSICS GenTree* impHWIntrinsic(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, bool mustExpand); GenTree* impSimdAsHWIntrinsic(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, GenTree* newobjThis); protected: bool compSupportsHWIntrinsic(CORINFO_InstructionSet isa); GenTree* impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, var_types retType, CorInfoType simdBaseJitType, unsigned simdSize, GenTree* newobjThis); GenTree* impSpecialIntrinsic(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, CorInfoType simdBaseJitType, var_types retType, unsigned simdSize); GenTree* getArgForHWIntrinsic(var_types argType, CORINFO_CLASS_HANDLE argClass, bool expectAddr = false, GenTree* newobjThis = nullptr); GenTree* impNonConstFallback(NamedIntrinsic intrinsic, var_types simdType, CorInfoType simdBaseJitType); GenTree* addRangeCheckIfNeeded( NamedIntrinsic intrinsic, GenTree* immOp, bool mustExpand, int immLowerBound, int immUpperBound); GenTree* addRangeCheckForHWIntrinsic(GenTree* immOp, int immLowerBound, int immUpperBound); #ifdef TARGET_XARCH GenTree* impBaseIntrinsic(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, CorInfoType simdBaseJitType, var_types retType, unsigned simdSize); GenTree* impSSEIntrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig); GenTree* impSSE2Intrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig); GenTree* impAvxOrAvx2Intrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig); GenTree* impBMI1OrBMI2Intrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig); #endif // TARGET_XARCH #endif // FEATURE_HW_INTRINSICS GenTree* impArrayAccessIntrinsic(CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, int memberRef, bool readonlyCall, NamedIntrinsic intrinsicName); GenTree* impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig); GenTree* impCreateSpanIntrinsic(CORINFO_SIG_INFO* sig); GenTree* impKeepAliveIntrinsic(GenTree* objToKeepAlive); GenTree* impMethodPointer(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo); GenTree* impTransformThis(GenTree* thisPtr, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, CORINFO_THIS_TRANSFORM transform); //----------------- Manipulating the trees and stmts ---------------------- Statement* impStmtList; // Statements for the BB being imported. Statement* impLastStmt; // The last statement for the current BB. public: enum { CHECK_SPILL_ALL = -1, CHECK_SPILL_NONE = -2 }; void impBeginTreeList(); void impEndTreeList(BasicBlock* block, Statement* firstStmt, Statement* lastStmt); void impEndTreeList(BasicBlock* block); void impAppendStmtCheck(Statement* stmt, unsigned chkLevel); void impAppendStmt(Statement* stmt, unsigned chkLevel, bool checkConsumedDebugInfo = true); void impAppendStmt(Statement* stmt); void impInsertStmtBefore(Statement* stmt, Statement* stmtBefore); Statement* impAppendTree(GenTree* tree, unsigned chkLevel, const DebugInfo& di, bool checkConsumedDebugInfo = true); void impInsertTreeBefore(GenTree* tree, const DebugInfo& di, Statement* stmtBefore); void impAssignTempGen(unsigned tmp, GenTree* val, unsigned curLevel = (unsigned)CHECK_SPILL_NONE, Statement** pAfterStmt = nullptr, const DebugInfo& di = DebugInfo(), BasicBlock* block = nullptr); void impAssignTempGen(unsigned tmpNum, GenTree* val, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt = nullptr, const DebugInfo& di = DebugInfo(), BasicBlock* block = nullptr); Statement* impExtractLastStmt(); GenTree* impCloneExpr(GenTree* tree, GenTree** clone, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt DEBUGARG(const char* reason)); GenTree* impAssignStruct(GenTree* dest, GenTree* src, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt = nullptr, const DebugInfo& di = DebugInfo(), BasicBlock* block = nullptr); GenTree* impAssignStructPtr(GenTree* dest, GenTree* src, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt = nullptr, const DebugInfo& di = DebugInfo(), BasicBlock* block = nullptr); GenTree* impGetStructAddr(GenTree* structVal, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, bool willDeref); var_types impNormStructType(CORINFO_CLASS_HANDLE structHnd, CorInfoType* simdBaseJitType = nullptr); GenTree* impNormStructVal(GenTree* structVal, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, bool forceNormalization = false); GenTree* impTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool* pRuntimeLookup = nullptr, bool mustRestoreHandle = false, bool importParent = false); GenTree* impParentClassTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool* pRuntimeLookup = nullptr, bool mustRestoreHandle = false) { return impTokenToHandle(pResolvedToken, pRuntimeLookup, mustRestoreHandle, true); } GenTree* impLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, GenTreeFlags flags, void* compileTimeHandle); GenTree* getRuntimeContextTree(CORINFO_RUNTIME_LOOKUP_KIND kind); GenTree* impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, void* compileTimeHandle); GenTree* impReadyToRunLookupToTree(CORINFO_CONST_LOOKUP* pLookup, GenTreeFlags flags, void* compileTimeHandle); GenTreeCall* impReadyToRunHelperToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoHelpFunc helper, var_types type, GenTreeCall::Use* args = nullptr, CORINFO_LOOKUP_KIND* pGenericLookupKind = nullptr); bool impIsCastHelperEligibleForClassProbe(GenTree* tree); bool impIsCastHelperMayHaveProfileData(GenTree* tree); GenTree* impCastClassOrIsInstToTree( GenTree* op1, GenTree* op2, CORINFO_RESOLVED_TOKEN* pResolvedToken, bool isCastClass, IL_OFFSET ilOffset); GenTree* impOptimizeCastClassOrIsInst(GenTree* op1, CORINFO_RESOLVED_TOKEN* pResolvedToken, bool isCastClass); bool VarTypeIsMultiByteAndCanEnreg(var_types type, CORINFO_CLASS_HANDLE typeClass, unsigned* typeSize, bool forReturn, bool isVarArg, CorInfoCallConvExtension callConv); bool IsIntrinsicImplementedByUserCall(NamedIntrinsic intrinsicName); bool IsTargetIntrinsic(NamedIntrinsic intrinsicName); bool IsMathIntrinsic(NamedIntrinsic intrinsicName); bool IsMathIntrinsic(GenTree* tree); private: //----------------- Importing the method ---------------------------------- CORINFO_CONTEXT_HANDLE impTokenLookupContextHandle; // The context used for looking up tokens. #ifdef DEBUG unsigned impCurOpcOffs; const char* impCurOpcName; bool impNestedStackSpill; // For displaying instrs with generated native code (-n:B) Statement* impLastILoffsStmt; // oldest stmt added for which we did not call SetLastILOffset(). void impNoteLastILoffs(); #endif // Debug info of current statement being imported. It gets set to contain // no IL location (!impCurStmtDI.GetLocation().IsValid) after it has been // set in the appended trees. Then it gets updated at IL instructions for // which we have to report mapping info. // It will always contain the current inline context. DebugInfo impCurStmtDI; DebugInfo impCreateDIWithCurrentStackInfo(IL_OFFSET offs, bool isCall); void impCurStmtOffsSet(IL_OFFSET offs); void impNoteBranchOffs(); unsigned impInitBlockLineInfo(); bool impIsThis(GenTree* obj); bool impIsLDFTN_TOKEN(const BYTE* delegateCreateStart, const BYTE* newobjCodeAddr); bool impIsDUP_LDVIRTFTN_TOKEN(const BYTE* delegateCreateStart, const BYTE* newobjCodeAddr); bool impIsAnySTLOC(OPCODE opcode) { return ((opcode == CEE_STLOC) || (opcode == CEE_STLOC_S) || ((opcode >= CEE_STLOC_0) && (opcode <= CEE_STLOC_3))); } GenTreeCall::Use* impPopCallArgs(unsigned count, CORINFO_SIG_INFO* sig, GenTreeCall::Use* prefixArgs = nullptr); bool impCheckImplicitArgumentCoercion(var_types sigType, var_types nodeType) const; GenTreeCall::Use* impPopReverseCallArgs(unsigned count, CORINFO_SIG_INFO* sig, unsigned skipReverseCount = 0); //---------------- Spilling the importer stack ---------------------------- // The maximum number of bytes of IL processed without clean stack state. // It allows to limit the maximum tree size and depth. static const unsigned MAX_TREE_SIZE = 200; bool impCanSpillNow(OPCODE prevOpcode); struct PendingDsc { PendingDsc* pdNext; BasicBlock* pdBB; SavedStack pdSavedStack; ThisInitState pdThisPtrInit; }; PendingDsc* impPendingList; // list of BBs currently waiting to be imported. PendingDsc* impPendingFree; // Freed up dscs that can be reused // We keep a byte-per-block map (dynamically extended) in the top-level Compiler object of a compilation. JitExpandArray<BYTE> impPendingBlockMembers; // Return the byte for "b" (allocating/extending impPendingBlockMembers if necessary.) // Operates on the map in the top-level ancestor. BYTE impGetPendingBlockMember(BasicBlock* blk) { return impInlineRoot()->impPendingBlockMembers.Get(blk->bbInd()); } // Set the byte for "b" to "val" (allocating/extending impPendingBlockMembers if necessary.) // Operates on the map in the top-level ancestor. void impSetPendingBlockMember(BasicBlock* blk, BYTE val) { impInlineRoot()->impPendingBlockMembers.Set(blk->bbInd(), val); } bool impCanReimport; bool impSpillStackEntry(unsigned level, unsigned varNum #ifdef DEBUG , bool bAssertOnRecursion, const char* reason #endif ); void impSpillStackEnsure(bool spillLeaves = false); void impEvalSideEffects(); void impSpillSpecialSideEff(); void impSpillSideEffects(bool spillGlobEffects, unsigned chkLevel DEBUGARG(const char* reason)); void impSpillValueClasses(); void impSpillEvalStack(); static fgWalkPreFn impFindValueClasses; void impSpillLclRefs(ssize_t lclNum); BasicBlock* impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_HANDLE clsHnd, bool isSingleBlockFilter); bool impBlockIsInALoop(BasicBlock* block); void impImportBlockCode(BasicBlock* block); void impReimportMarkBlock(BasicBlock* block); void impReimportMarkSuccessors(BasicBlock* block); void impVerifyEHBlock(BasicBlock* block, bool isTryStart); void impImportBlockPending(BasicBlock* block); // Similar to impImportBlockPending, but assumes that block has already been imported once and is being // reimported for some reason. It specifically does *not* look at verCurrentState to set the EntryState // for the block, but instead, just re-uses the block's existing EntryState. void impReimportBlockPending(BasicBlock* block); var_types impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTree** pOp1, GenTree** pOp2); void impImportBlock(BasicBlock* block); // Assumes that "block" is a basic block that completes with a non-empty stack. We will assign the values // on the stack to local variables (the "spill temp" variables). The successor blocks will assume that // its incoming stack contents are in those locals. This requires "block" and its successors to agree on // the variables that will be used -- and for all the predecessors of those successors, and the // successors of those predecessors, etc. Call such a set of blocks closed under alternating // successor/predecessor edges a "spill clique." A block is a "predecessor" or "successor" member of the // clique (or, conceivably, both). Each block has a specified sequence of incoming and outgoing spill // temps. If "block" already has its outgoing spill temps assigned (they are always a contiguous series // of local variable numbers, so we represent them with the base local variable number), returns that. // Otherwise, picks a set of spill temps, and propagates this choice to all blocks in the spill clique of // which "block" is a member (asserting, in debug mode, that no block in this clique had its spill temps // chosen already. More precisely, that the incoming or outgoing spill temps are not chosen, depending // on which kind of member of the clique the block is). unsigned impGetSpillTmpBase(BasicBlock* block); // Assumes that "block" is a basic block that completes with a non-empty stack. We have previously // assigned the values on the stack to local variables (the "spill temp" variables). The successor blocks // will assume that its incoming stack contents are in those locals. This requires "block" and its // successors to agree on the variables and their types that will be used. The CLI spec allows implicit // conversions between 'int' and 'native int' or 'float' and 'double' stack types. So one predecessor can // push an int and another can push a native int. For 64-bit we have chosen to implement this by typing // the "spill temp" as native int, and then importing (or re-importing as needed) so that all the // predecessors in the "spill clique" push a native int (sign-extending if needed), and all the // successors receive a native int. Similarly float and double are unified to double. // This routine is called after a type-mismatch is detected, and it will walk the spill clique to mark // blocks for re-importation as appropriate (both successors, so they get the right incoming type, and // predecessors, so they insert an upcast if needed). void impReimportSpillClique(BasicBlock* block); // When we compute a "spill clique" (see above) these byte-maps are allocated to have a byte per basic // block, and represent the predecessor and successor members of the clique currently being computed. // *** Access to these will need to be locked in a parallel compiler. JitExpandArray<BYTE> impSpillCliquePredMembers; JitExpandArray<BYTE> impSpillCliqueSuccMembers; enum SpillCliqueDir { SpillCliquePred, SpillCliqueSucc }; // Abstract class for receiving a callback while walking a spill clique class SpillCliqueWalker { public: virtual void Visit(SpillCliqueDir predOrSucc, BasicBlock* blk) = 0; }; // This class is used for setting the bbStkTempsIn and bbStkTempsOut on the blocks within a spill clique class SetSpillTempsBase : public SpillCliqueWalker { unsigned m_baseTmp; public: SetSpillTempsBase(unsigned baseTmp) : m_baseTmp(baseTmp) { } virtual void Visit(SpillCliqueDir predOrSucc, BasicBlock* blk); }; // This class is used for implementing impReimportSpillClique part on each block within the spill clique class ReimportSpillClique : public SpillCliqueWalker { Compiler* m_pComp; public: ReimportSpillClique(Compiler* pComp) : m_pComp(pComp) { } virtual void Visit(SpillCliqueDir predOrSucc, BasicBlock* blk); }; // This is the heart of the algorithm for walking spill cliques. It invokes callback->Visit for each // predecessor or successor within the spill clique void impWalkSpillCliqueFromPred(BasicBlock* pred, SpillCliqueWalker* callback); // For a BasicBlock that has already been imported, the EntryState has an array of GenTrees for the // incoming locals. This walks that list an resets the types of the GenTrees to match the types of // the VarDscs. They get out of sync when we have int/native int issues (see impReimportSpillClique). void impRetypeEntryStateTemps(BasicBlock* blk); BYTE impSpillCliqueGetMember(SpillCliqueDir predOrSucc, BasicBlock* blk); void impSpillCliqueSetMember(SpillCliqueDir predOrSucc, BasicBlock* blk, BYTE val); void impPushVar(GenTree* op, typeInfo tiRetVal); GenTreeLclVar* impCreateLocalNode(unsigned lclNum DEBUGARG(IL_OFFSET offset)); void impLoadVar(unsigned lclNum, IL_OFFSET offset, const typeInfo& tiRetVal); void impLoadVar(unsigned lclNum, IL_OFFSET offset) { impLoadVar(lclNum, offset, lvaGetDesc(lclNum)->lvVerTypeInfo); } void impLoadArg(unsigned ilArgNum, IL_OFFSET offset); void impLoadLoc(unsigned ilLclNum, IL_OFFSET offset); bool impReturnInstruction(int prefixFlags, OPCODE& opcode); #ifdef TARGET_ARM void impMarkLclDstNotPromotable(unsigned tmpNum, GenTree* op, CORINFO_CLASS_HANDLE hClass); #endif // A free list of linked list nodes used to represent to-do stacks of basic blocks. struct BlockListNode { BasicBlock* m_blk; BlockListNode* m_next; BlockListNode(BasicBlock* blk, BlockListNode* next = nullptr) : m_blk(blk), m_next(next) { } void* operator new(size_t sz, Compiler* comp); }; BlockListNode* impBlockListNodeFreeList; void FreeBlockListNode(BlockListNode* node); bool impIsValueType(typeInfo* pTypeInfo); var_types mangleVarArgsType(var_types type); regNumber getCallArgIntRegister(regNumber floatReg); regNumber getCallArgFloatRegister(regNumber intReg); #if defined(DEBUG) static unsigned jitTotalMethodCompiled; #endif #ifdef DEBUG static LONG jitNestingLevel; #endif // DEBUG static bool impIsAddressInLocal(const GenTree* tree, GenTree** lclVarTreeOut = nullptr); void impMakeDiscretionaryInlineObservations(InlineInfo* pInlineInfo, InlineResult* inlineResult); // STATIC inlining decision based on the IL code. void impCanInlineIL(CORINFO_METHOD_HANDLE fncHandle, CORINFO_METHOD_INFO* methInfo, bool forceInline, InlineResult* inlineResult); void impCheckCanInline(GenTreeCall* call, CORINFO_METHOD_HANDLE fncHandle, unsigned methAttr, CORINFO_CONTEXT_HANDLE exactContextHnd, InlineCandidateInfo** ppInlineCandidateInfo, InlineResult* inlineResult); void impInlineRecordArgInfo(InlineInfo* pInlineInfo, GenTree* curArgVal, unsigned argNum, InlineResult* inlineResult); void impInlineInitVars(InlineInfo* pInlineInfo); unsigned impInlineFetchLocal(unsigned lclNum DEBUGARG(const char* reason)); GenTree* impInlineFetchArg(unsigned lclNum, InlArgInfo* inlArgInfo, InlLclVarInfo* lclTypeInfo); bool impInlineIsThis(GenTree* tree, InlArgInfo* inlArgInfo); bool impInlineIsGuaranteedThisDerefBeforeAnySideEffects(GenTree* additionalTree, GenTreeCall::Use* additionalCallArgs, GenTree* dereferencedAddress, InlArgInfo* inlArgInfo); void impMarkInlineCandidate(GenTree* call, CORINFO_CONTEXT_HANDLE exactContextHnd, bool exactContextNeedsRuntimeLookup, CORINFO_CALL_INFO* callInfo); void impMarkInlineCandidateHelper(GenTreeCall* call, CORINFO_CONTEXT_HANDLE exactContextHnd, bool exactContextNeedsRuntimeLookup, CORINFO_CALL_INFO* callInfo); bool impTailCallRetTypeCompatible(bool allowWidening, var_types callerRetType, CORINFO_CLASS_HANDLE callerRetTypeClass, CorInfoCallConvExtension callerCallConv, var_types calleeRetType, CORINFO_CLASS_HANDLE calleeRetTypeClass, CorInfoCallConvExtension calleeCallConv); bool impIsTailCallILPattern( bool tailPrefixed, OPCODE curOpcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, bool isRecursive); bool impIsImplicitTailCallCandidate( OPCODE curOpcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, int prefixFlags, bool isRecursive); bool impIsClassExact(CORINFO_CLASS_HANDLE classHnd); bool impCanSkipCovariantStoreCheck(GenTree* value, GenTree* array); CORINFO_RESOLVED_TOKEN* impAllocateToken(const CORINFO_RESOLVED_TOKEN& token); /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX FlowGraph XX XX XX XX Info about the basic-blocks, their contents and the flow analysis XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: BasicBlock* fgFirstBB; // Beginning of the basic block list BasicBlock* fgLastBB; // End of the basic block list BasicBlock* fgFirstColdBlock; // First block to be placed in the cold section BasicBlock* fgEntryBB; // For OSR, the original method's entry point BasicBlock* fgOSREntryBB; // For OSR, the logical entry point (~ patchpoint) #if defined(FEATURE_EH_FUNCLETS) BasicBlock* fgFirstFuncletBB; // First block of outlined funclets (to allow block insertion before the funclets) #endif BasicBlock* fgFirstBBScratch; // Block inserted for initialization stuff. Is nullptr if no such block has been // created. BasicBlockList* fgReturnBlocks; // list of BBJ_RETURN blocks unsigned fgEdgeCount; // # of control flow edges between the BBs unsigned fgBBcount; // # of BBs in the method #ifdef DEBUG unsigned fgBBcountAtCodegen; // # of BBs in the method at the start of codegen #endif unsigned fgBBNumMax; // The max bbNum that has been assigned to basic blocks unsigned fgDomBBcount; // # of BBs for which we have dominator and reachability information BasicBlock** fgBBInvPostOrder; // The flow graph stored in an array sorted in topological order, needed to compute // dominance. Indexed by block number. Size: fgBBNumMax + 1. // After the dominance tree is computed, we cache a DFS preorder number and DFS postorder number to compute // dominance queries in O(1). fgDomTreePreOrder and fgDomTreePostOrder are arrays giving the block's preorder and // postorder number, respectively. The arrays are indexed by basic block number. (Note that blocks are numbered // starting from one. Thus, we always waste element zero. This makes debugging easier and makes the code less likely // to suffer from bugs stemming from forgetting to add or subtract one from the block number to form an array // index). The arrays are of size fgBBNumMax + 1. unsigned* fgDomTreePreOrder; unsigned* fgDomTreePostOrder; // Dominator tree used by SSA construction and copy propagation (the two are expected to use the same tree // in order to avoid the need for SSA reconstruction and an "out of SSA" phase). DomTreeNode* fgSsaDomTree; bool fgBBVarSetsInited; // Allocate array like T* a = new T[fgBBNumMax + 1]; // Using helper so we don't keep forgetting +1. template <typename T> T* fgAllocateTypeForEachBlk(CompMemKind cmk = CMK_Unknown) { return getAllocator(cmk).allocate<T>(fgBBNumMax + 1); } // BlockSets are relative to a specific set of BasicBlock numbers. If that changes // (if the blocks are renumbered), this changes. BlockSets from different epochs // cannot be meaningfully combined. Note that new blocks can be created with higher // block numbers without changing the basic block epoch. These blocks *cannot* // participate in a block set until the blocks are all renumbered, causing the epoch // to change. This is useful if continuing to use previous block sets is valuable. // If the epoch is zero, then it is uninitialized, and block sets can't be used. unsigned fgCurBBEpoch; unsigned GetCurBasicBlockEpoch() { return fgCurBBEpoch; } // The number of basic blocks in the current epoch. When the blocks are renumbered, // this is fgBBcount. As blocks are added, fgBBcount increases, fgCurBBEpochSize remains // the same, until a new BasicBlock epoch is created, such as when the blocks are all renumbered. unsigned fgCurBBEpochSize; // The number of "size_t" elements required to hold a bitset large enough for fgCurBBEpochSize // bits. This is precomputed to avoid doing math every time BasicBlockBitSetTraits::GetArrSize() is called. unsigned fgBBSetCountInSizeTUnits; void NewBasicBlockEpoch() { INDEBUG(unsigned oldEpochArrSize = fgBBSetCountInSizeTUnits); // We have a new epoch. Compute and cache the size needed for new BlockSets. fgCurBBEpoch++; fgCurBBEpochSize = fgBBNumMax + 1; fgBBSetCountInSizeTUnits = roundUp(fgCurBBEpochSize, (unsigned)(sizeof(size_t) * 8)) / unsigned(sizeof(size_t) * 8); #ifdef DEBUG // All BlockSet objects are now invalid! fgReachabilitySetsValid = false; // the bbReach sets are now invalid! fgEnterBlksSetValid = false; // the fgEnterBlks set is now invalid! if (verbose) { unsigned epochArrSize = BasicBlockBitSetTraits::GetArrSize(this, sizeof(size_t)); printf("\nNew BlockSet epoch %d, # of blocks (including unused BB00): %u, bitset array size: %u (%s)", fgCurBBEpoch, fgCurBBEpochSize, epochArrSize, (epochArrSize <= 1) ? "short" : "long"); if ((fgCurBBEpoch != 1) && ((oldEpochArrSize <= 1) != (epochArrSize <= 1))) { // If we're not just establishing the first epoch, and the epoch array size has changed such that we're // going to change our bitset representation from short (just a size_t bitset) to long (a pointer to an // array of size_t bitsets), then print that out. printf("; NOTE: BlockSet size was previously %s!", (oldEpochArrSize <= 1) ? "short" : "long"); } printf("\n"); } #endif // DEBUG } void EnsureBasicBlockEpoch() { if (fgCurBBEpochSize != fgBBNumMax + 1) { NewBasicBlockEpoch(); } } BasicBlock* fgNewBasicBlock(BBjumpKinds jumpKind); void fgEnsureFirstBBisScratch(); bool fgFirstBBisScratch(); bool fgBBisScratch(BasicBlock* block); void fgExtendEHRegionBefore(BasicBlock* block); void fgExtendEHRegionAfter(BasicBlock* block); BasicBlock* fgNewBBbefore(BBjumpKinds jumpKind, BasicBlock* block, bool extendRegion); BasicBlock* fgNewBBafter(BBjumpKinds jumpKind, BasicBlock* block, bool extendRegion); BasicBlock* fgNewBBinRegion(BBjumpKinds jumpKind, unsigned tryIndex, unsigned hndIndex, BasicBlock* nearBlk, bool putInFilter = false, bool runRarely = false, bool insertAtEnd = false); BasicBlock* fgNewBBinRegion(BBjumpKinds jumpKind, BasicBlock* srcBlk, bool runRarely = false, bool insertAtEnd = false); BasicBlock* fgNewBBinRegion(BBjumpKinds jumpKind); BasicBlock* fgNewBBinRegionWorker(BBjumpKinds jumpKind, BasicBlock* afterBlk, unsigned xcptnIndex, bool putInTryRegion); void fgInsertBBbefore(BasicBlock* insertBeforeBlk, BasicBlock* newBlk); void fgInsertBBafter(BasicBlock* insertAfterBlk, BasicBlock* newBlk); void fgUnlinkBlock(BasicBlock* block); #ifdef FEATURE_JIT_METHOD_PERF unsigned fgMeasureIR(); #endif // FEATURE_JIT_METHOD_PERF bool fgModified; // True if the flow graph has been modified recently bool fgComputePredsDone; // Have we computed the bbPreds list bool fgCheapPredsValid; // Is the bbCheapPreds list valid? bool fgDomsComputed; // Have we computed the dominator sets? bool fgReturnBlocksComputed; // Have we computed the return blocks list? bool fgOptimizedFinally; // Did we optimize any try-finallys? bool fgHasSwitch; // any BBJ_SWITCH jumps? BlockSet fgEnterBlks; // Set of blocks which have a special transfer of control; the "entry" blocks plus EH handler // begin blocks. #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) BlockSet fgAlwaysBlks; // Set of blocks which are BBJ_ALWAYS part of BBJ_CALLFINALLY/BBJ_ALWAYS pair that should // never be removed due to a requirement to use the BBJ_ALWAYS for generating code and // not have "retless" blocks. #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) #ifdef DEBUG bool fgReachabilitySetsValid; // Are the bbReach sets valid? bool fgEnterBlksSetValid; // Is the fgEnterBlks set valid? #endif // DEBUG bool fgRemoveRestOfBlock; // true if we know that we will throw bool fgStmtRemoved; // true if we remove statements -> need new DFA // There are two modes for ordering of the trees. // - In FGOrderTree, the dominant ordering is the tree order, and the nodes contained in // each tree and sub-tree are contiguous, and can be traversed (in gtNext/gtPrev order) // by traversing the tree according to the order of the operands. // - In FGOrderLinear, the dominant ordering is the linear order. enum FlowGraphOrder { FGOrderTree, FGOrderLinear }; FlowGraphOrder fgOrder; // The following are boolean flags that keep track of the state of internal data structures bool fgStmtListThreaded; // true if the node list is now threaded bool fgCanRelocateEHRegions; // true if we are allowed to relocate the EH regions bool fgEdgeWeightsComputed; // true after we have called fgComputeEdgeWeights bool fgHaveValidEdgeWeights; // true if we were successful in computing all of the edge weights bool fgSlopUsedInEdgeWeights; // true if their was some slop used when computing the edge weights bool fgRangeUsedInEdgeWeights; // true if some of the edgeWeight are expressed in Min..Max form bool fgNeedsUpdateFlowGraph; // true if we need to run fgUpdateFlowGraph weight_t fgCalledCount; // count of the number of times this method was called // This is derived from the profile data // or is BB_UNITY_WEIGHT when we don't have profile data #if defined(FEATURE_EH_FUNCLETS) bool fgFuncletsCreated; // true if the funclet creation phase has been run #endif // FEATURE_EH_FUNCLETS bool fgGlobalMorph; // indicates if we are during the global morphing phase // since fgMorphTree can be called from several places bool impBoxTempInUse; // the temp below is valid and available unsigned impBoxTemp; // a temporary that is used for boxing #ifdef DEBUG bool jitFallbackCompile; // Are we doing a fallback compile? That is, have we executed a NO_WAY assert, // and we are trying to compile again in a "safer", minopts mode? #endif #if defined(DEBUG) unsigned impInlinedCodeSize; bool fgPrintInlinedMethods; #endif jitstd::vector<flowList*>* fgPredListSortVector; //------------------------------------------------------------------------- void fgInit(); PhaseStatus fgImport(); PhaseStatus fgTransformIndirectCalls(); PhaseStatus fgTransformPatchpoints(); PhaseStatus fgInline(); PhaseStatus fgRemoveEmptyTry(); PhaseStatus fgRemoveEmptyFinally(); PhaseStatus fgMergeFinallyChains(); PhaseStatus fgCloneFinally(); void fgCleanupContinuation(BasicBlock* continuation); #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) PhaseStatus fgUpdateFinallyTargetFlags(); void fgClearAllFinallyTargetBits(); void fgAddFinallyTargetFlags(); #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) PhaseStatus fgTailMergeThrows(); void fgTailMergeThrowsFallThroughHelper(BasicBlock* predBlock, BasicBlock* nonCanonicalBlock, BasicBlock* canonicalBlock, flowList* predEdge); void fgTailMergeThrowsJumpToHelper(BasicBlock* predBlock, BasicBlock* nonCanonicalBlock, BasicBlock* canonicalBlock, flowList* predEdge); GenTree* fgCheckCallArgUpdate(GenTree* parent, GenTree* child, var_types origType); #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // Sometimes we need to defer updating the BBF_FINALLY_TARGET bit. fgNeedToAddFinallyTargetBits signals // when this is necessary. bool fgNeedToAddFinallyTargetBits; #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) bool fgRetargetBranchesToCanonicalCallFinally(BasicBlock* block, BasicBlock* handler, BlockToBlockMap& continuationMap); GenTree* fgGetCritSectOfStaticMethod(); #if defined(FEATURE_EH_FUNCLETS) void fgAddSyncMethodEnterExit(); GenTree* fgCreateMonitorTree(unsigned lvaMonitorBool, unsigned lvaThisVar, BasicBlock* block, bool enter); void fgConvertSyncReturnToLeave(BasicBlock* block); #endif // FEATURE_EH_FUNCLETS void fgAddReversePInvokeEnterExit(); bool fgMoreThanOneReturnBlock(); // The number of separate return points in the method. unsigned fgReturnCount; void fgAddInternal(); enum class FoldResult { FOLD_DID_NOTHING, FOLD_CHANGED_CONTROL_FLOW, FOLD_REMOVED_LAST_STMT, FOLD_ALTERED_LAST_STMT, }; FoldResult fgFoldConditional(BasicBlock* block); void fgMorphStmts(BasicBlock* block); void fgMorphBlocks(); void fgMergeBlockReturn(BasicBlock* block); bool fgMorphBlockStmt(BasicBlock* block, Statement* stmt DEBUGARG(const char* msg)); void fgSetOptions(); #ifdef DEBUG static fgWalkPreFn fgAssertNoQmark; void fgPreExpandQmarkChecks(GenTree* expr); void fgPostExpandQmarkChecks(); static void fgCheckQmarkAllowedForm(GenTree* tree); #endif IL_OFFSET fgFindBlockILOffset(BasicBlock* block); void fgFixEntryFlowForOSR(); BasicBlock* fgSplitBlockAtBeginning(BasicBlock* curr); BasicBlock* fgSplitBlockAtEnd(BasicBlock* curr); BasicBlock* fgSplitBlockAfterStatement(BasicBlock* curr, Statement* stmt); BasicBlock* fgSplitBlockAfterNode(BasicBlock* curr, GenTree* node); // for LIR BasicBlock* fgSplitEdge(BasicBlock* curr, BasicBlock* succ); Statement* fgNewStmtFromTree(GenTree* tree, BasicBlock* block, const DebugInfo& di); Statement* fgNewStmtFromTree(GenTree* tree); Statement* fgNewStmtFromTree(GenTree* tree, BasicBlock* block); Statement* fgNewStmtFromTree(GenTree* tree, const DebugInfo& di); GenTree* fgGetTopLevelQmark(GenTree* expr, GenTree** ppDst = nullptr); void fgExpandQmarkForCastInstOf(BasicBlock* block, Statement* stmt); void fgExpandQmarkStmt(BasicBlock* block, Statement* stmt); void fgExpandQmarkNodes(); // Do "simple lowering." This functionality is (conceptually) part of "general" // lowering that is distributed between fgMorph and the lowering phase of LSRA. void fgSimpleLowering(); GenTree* fgInitThisClass(); GenTreeCall* fgGetStaticsCCtorHelper(CORINFO_CLASS_HANDLE cls, CorInfoHelpFunc helper); GenTreeCall* fgGetSharedCCtor(CORINFO_CLASS_HANDLE cls); bool backendRequiresLocalVarLifetimes() { return !opts.MinOpts() || m_pLinearScan->willEnregisterLocalVars(); } void fgLocalVarLiveness(); void fgLocalVarLivenessInit(); void fgPerNodeLocalVarLiveness(GenTree* node); void fgPerBlockLocalVarLiveness(); VARSET_VALRET_TP fgGetHandlerLiveVars(BasicBlock* block); void fgLiveVarAnalysis(bool updateInternalOnly = false); void fgComputeLifeCall(VARSET_TP& life, GenTreeCall* call); void fgComputeLifeTrackedLocalUse(VARSET_TP& life, LclVarDsc& varDsc, GenTreeLclVarCommon* node); bool fgComputeLifeTrackedLocalDef(VARSET_TP& life, VARSET_VALARG_TP keepAliveVars, LclVarDsc& varDsc, GenTreeLclVarCommon* node); bool fgComputeLifeUntrackedLocal(VARSET_TP& life, VARSET_VALARG_TP keepAliveVars, LclVarDsc& varDsc, GenTreeLclVarCommon* lclVarNode); bool fgComputeLifeLocal(VARSET_TP& life, VARSET_VALARG_TP keepAliveVars, GenTree* lclVarNode); void fgComputeLife(VARSET_TP& life, GenTree* startNode, GenTree* endNode, VARSET_VALARG_TP volatileVars, bool* pStmtInfoDirty DEBUGARG(bool* treeModf)); void fgComputeLifeLIR(VARSET_TP& life, BasicBlock* block, VARSET_VALARG_TP volatileVars); bool fgTryRemoveNonLocal(GenTree* node, LIR::Range* blockRange); void fgRemoveDeadStoreLIR(GenTree* store, BasicBlock* block); bool fgRemoveDeadStore(GenTree** pTree, LclVarDsc* varDsc, VARSET_VALARG_TP life, bool* doAgain, bool* pStmtInfoDirty, bool* pStoreRemoved DEBUGARG(bool* treeModf)); void fgInterBlockLocalVarLiveness(); // Blocks: convenience methods for enabling range-based `for` iteration over the function's blocks, e.g.: // 1. for (BasicBlock* const block : compiler->Blocks()) ... // 2. for (BasicBlock* const block : compiler->Blocks(startBlock)) ... // 3. for (BasicBlock* const block : compiler->Blocks(startBlock, endBlock)) ... // In case (1), the block list can be empty. In case (2), `startBlock` can be nullptr. In case (3), // both `startBlock` and `endBlock` must be non-null. // BasicBlockSimpleList Blocks() const { return BasicBlockSimpleList(fgFirstBB); } BasicBlockSimpleList Blocks(BasicBlock* startBlock) const { return BasicBlockSimpleList(startBlock); } BasicBlockRangeList Blocks(BasicBlock* startBlock, BasicBlock* endBlock) const { return BasicBlockRangeList(startBlock, endBlock); } // The presence of a partial definition presents some difficulties for SSA: this is both a use of some SSA name // of "x", and a def of a new SSA name for "x". The tree only has one local variable for "x", so it has to choose // whether to treat that as the use or def. It chooses the "use", and thus the old SSA name. This map allows us // to record/recover the "def" SSA number, given the lcl var node for "x" in such a tree. typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, unsigned> NodeToUnsignedMap; NodeToUnsignedMap* m_opAsgnVarDefSsaNums; NodeToUnsignedMap* GetOpAsgnVarDefSsaNums() { if (m_opAsgnVarDefSsaNums == nullptr) { m_opAsgnVarDefSsaNums = new (getAllocator()) NodeToUnsignedMap(getAllocator()); } return m_opAsgnVarDefSsaNums; } // This map tracks nodes whose value numbers explicitly or implicitly depend on memory states. // The map provides the entry block of the most closely enclosing loop that // defines the memory region accessed when defining the nodes's VN. // // This information should be consulted when considering hoisting node out of a loop, as the VN // for the node will only be valid within the indicated loop. // // It is not fine-grained enough to track memory dependence within loops, so cannot be used // for more general code motion. // // If a node does not have an entry in the map we currently assume the VN is not memory dependent // and so memory does not constrain hoisting. // typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, BasicBlock*> NodeToLoopMemoryBlockMap; NodeToLoopMemoryBlockMap* m_nodeToLoopMemoryBlockMap; NodeToLoopMemoryBlockMap* GetNodeToLoopMemoryBlockMap() { if (m_nodeToLoopMemoryBlockMap == nullptr) { m_nodeToLoopMemoryBlockMap = new (getAllocator()) NodeToLoopMemoryBlockMap(getAllocator()); } return m_nodeToLoopMemoryBlockMap; } void optRecordLoopMemoryDependence(GenTree* tree, BasicBlock* block, ValueNum memoryVN); void optCopyLoopMemoryDependence(GenTree* fromTree, GenTree* toTree); // Requires value numbering phase to have completed. Returns the value number ("gtVN") of the // "tree," EXCEPT in the case of GTF_VAR_USEASG, because the tree node's gtVN member is the // "use" VN. Performs a lookup into the map of (use asg tree -> def VN.) to return the "def's" // VN. inline ValueNum GetUseAsgDefVNOrTreeVN(GenTree* tree); // Requires that "lcl" has the GTF_VAR_DEF flag set. Returns the SSA number of "lcl". // Except: assumes that lcl is a def, and if it is // a partial def (GTF_VAR_USEASG), looks up and returns the SSA number for the "def", // rather than the "use" SSA number recorded in the tree "lcl". inline unsigned GetSsaNumForLocalVarDef(GenTree* lcl); inline bool PreciseRefCountsRequired(); // Performs SSA conversion. void fgSsaBuild(); // Reset any data structures to the state expected by "fgSsaBuild", so it can be run again. void fgResetForSsa(); unsigned fgSsaPassesCompleted; // Number of times fgSsaBuild has been run. // Returns "true" if this is a special variable that is never zero initialized in the prolog. inline bool fgVarIsNeverZeroInitializedInProlog(unsigned varNum); // Returns "true" if the variable needs explicit zero initialization. inline bool fgVarNeedsExplicitZeroInit(unsigned varNum, bool bbInALoop, bool bbIsReturn); // The value numbers for this compilation. ValueNumStore* vnStore; public: ValueNumStore* GetValueNumStore() { return vnStore; } // Do value numbering (assign a value number to each // tree node). void fgValueNumber(); // Computes new GcHeap VN via the assignment H[elemTypeEq][arrVN][inx][fldSeq] = rhsVN. // Assumes that "elemTypeEq" is the (equivalence class rep) of the array element type. // The 'indType' is the indirection type of the lhs of the assignment and will typically // match the element type of the array or fldSeq. When this type doesn't match // or if the fldSeq is 'NotAField' we invalidate the array contents H[elemTypeEq][arrVN] // ValueNum fgValueNumberArrIndexAssign(CORINFO_CLASS_HANDLE elemTypeEq, ValueNum arrVN, ValueNum inxVN, FieldSeqNode* fldSeq, ValueNum rhsVN, var_types indType); // Requires that "tree" is a GT_IND marked as an array index, and that its address argument // has been parsed to yield the other input arguments. If evaluation of the address // can raise exceptions, those should be captured in the exception set "addrXvnp". // Assumes that "elemTypeEq" is the (equivalence class rep) of the array element type. // Marks "tree" with the VN for H[elemTypeEq][arrVN][inx][fldSeq] (for the liberal VN; a new unique // VN for the conservative VN.) Also marks the tree's argument as the address of an array element. // The type tree->TypeGet() will typically match the element type of the array or fldSeq. // When this type doesn't match or if the fldSeq is 'NotAField' we return a new unique VN // ValueNum fgValueNumberArrIndexVal(GenTree* tree, CORINFO_CLASS_HANDLE elemTypeEq, ValueNum arrVN, ValueNum inxVN, ValueNumPair addrXvnp, FieldSeqNode* fldSeq); // Requires "funcApp" to be a VNF_PtrToArrElem, and "addrXvnp" to represent the exception set thrown // by evaluating the array index expression "tree". Returns the value number resulting from // dereferencing the array in the current GcHeap state. If "tree" is non-null, it must be the // "GT_IND" that does the dereference, and it is given the returned value number. ValueNum fgValueNumberArrIndexVal(GenTree* tree, VNFuncApp* funcApp, ValueNumPair addrXvnp); // Compute the value number for a byref-exposed load of the given type via the given pointerVN. ValueNum fgValueNumberByrefExposedLoad(var_types type, ValueNum pointerVN); unsigned fgVNPassesCompleted; // Number of times fgValueNumber has been run. // Utility functions for fgValueNumber. // Perform value-numbering for the trees in "blk". void fgValueNumberBlock(BasicBlock* blk); // Requires that "entryBlock" is the entry block of loop "loopNum", and that "loopNum" is the // innermost loop of which "entryBlock" is the entry. Returns the value number that should be // assumed for the memoryKind at the start "entryBlk". ValueNum fgMemoryVNForLoopSideEffects(MemoryKind memoryKind, BasicBlock* entryBlock, unsigned loopNum); // Called when an operation (performed by "tree", described by "msg") may cause the GcHeap to be mutated. // As GcHeap is a subset of ByrefExposed, this will also annotate the ByrefExposed mutation. void fgMutateGcHeap(GenTree* tree DEBUGARG(const char* msg)); // Called when an operation (performed by "tree", described by "msg") may cause an address-exposed local to be // mutated. void fgMutateAddressExposedLocal(GenTree* tree DEBUGARG(const char* msg)); // For a GC heap store at curTree, record the new curMemoryVN's and update curTree's MemorySsaMap. // As GcHeap is a subset of ByrefExposed, this will also record the ByrefExposed store. void recordGcHeapStore(GenTree* curTree, ValueNum gcHeapVN DEBUGARG(const char* msg)); // For a store to an address-exposed local at curTree, record the new curMemoryVN and update curTree's MemorySsaMap. void recordAddressExposedLocalStore(GenTree* curTree, ValueNum memoryVN DEBUGARG(const char* msg)); void fgSetCurrentMemoryVN(MemoryKind memoryKind, ValueNum newMemoryVN); // Tree caused an update in the current memory VN. If "tree" has an associated heap SSA #, record that // value in that SSA #. void fgValueNumberRecordMemorySsa(MemoryKind memoryKind, GenTree* tree); // The input 'tree' is a leaf node that is a constant // Assign the proper value number to the tree void fgValueNumberTreeConst(GenTree* tree); // If the VN store has been initialized, reassign the // proper value number to the constant tree. void fgUpdateConstTreeValueNumber(GenTree* tree); // Assumes that all inputs to "tree" have had value numbers assigned; assigns a VN to tree. // (With some exceptions: the VN of the lhs of an assignment is assigned as part of the // assignment.) void fgValueNumberTree(GenTree* tree); void fgValueNumberAssignment(GenTreeOp* tree); // Does value-numbering for a block assignment. void fgValueNumberBlockAssignment(GenTree* tree); bool fgValueNumberBlockAssignmentTypeCheck(LclVarDsc* dstVarDsc, FieldSeqNode* dstFldSeq, GenTree* src); // Does value-numbering for a cast tree. void fgValueNumberCastTree(GenTree* tree); // Does value-numbering for an intrinsic tree. void fgValueNumberIntrinsic(GenTree* tree); #ifdef FEATURE_SIMD // Does value-numbering for a GT_SIMD tree void fgValueNumberSimd(GenTreeSIMD* tree); #endif // FEATURE_SIMD #ifdef FEATURE_HW_INTRINSICS // Does value-numbering for a GT_HWINTRINSIC tree void fgValueNumberHWIntrinsic(GenTreeHWIntrinsic* tree); #endif // FEATURE_HW_INTRINSICS // Does value-numbering for a call. We interpret some helper calls. void fgValueNumberCall(GenTreeCall* call); // Does value-numbering for a helper representing a cast operation. void fgValueNumberCastHelper(GenTreeCall* call); // Does value-numbering for a helper "call" that has a VN function symbol "vnf". void fgValueNumberHelperCallFunc(GenTreeCall* call, VNFunc vnf, ValueNumPair vnpExc); // Requires "helpCall" to be a helper call. Assigns it a value number; // we understand the semantics of some of the calls. Returns "true" if // the call may modify the heap (we assume arbitrary memory side effects if so). bool fgValueNumberHelperCall(GenTreeCall* helpCall); // Requires that "helpFunc" is one of the pure Jit Helper methods. // Returns the corresponding VNFunc to use for value numbering VNFunc fgValueNumberJitHelperMethodVNFunc(CorInfoHelpFunc helpFunc); // Adds the exception set for the current tree node which has a memory indirection operation void fgValueNumberAddExceptionSetForIndirection(GenTree* tree, GenTree* baseAddr); // Adds the exception sets for the current tree node which is performing a division or modulus operation void fgValueNumberAddExceptionSetForDivision(GenTree* tree); // Adds the exception set for the current tree node which is performing a overflow checking operation void fgValueNumberAddExceptionSetForOverflow(GenTree* tree); // Adds the exception set for the current tree node which is performing a bounds check operation void fgValueNumberAddExceptionSetForBoundsCheck(GenTree* tree); // Adds the exception set for the current tree node which is performing a ckfinite operation void fgValueNumberAddExceptionSetForCkFinite(GenTree* tree); // Adds the exception sets for the current tree node void fgValueNumberAddExceptionSet(GenTree* tree); #ifdef DEBUG void fgDebugCheckExceptionSets(); void fgDebugCheckValueNumberedTree(GenTree* tree); #endif // These are the current value number for the memory implicit variables while // doing value numbering. These are the value numbers under the "liberal" interpretation // of memory values; the "conservative" interpretation needs no VN, since every access of // memory yields an unknown value. ValueNum fgCurMemoryVN[MemoryKindCount]; // Return a "pseudo"-class handle for an array element type. If "elemType" is TYP_STRUCT, // requires "elemStructType" to be non-null (and to have a low-order zero). Otherwise, low order bit // is 1, and the rest is an encoding of "elemTyp". static CORINFO_CLASS_HANDLE EncodeElemType(var_types elemTyp, CORINFO_CLASS_HANDLE elemStructType) { if (elemStructType != nullptr) { assert(varTypeIsStruct(elemTyp) || elemTyp == TYP_REF || elemTyp == TYP_BYREF || varTypeIsIntegral(elemTyp)); assert((size_t(elemStructType) & 0x1) == 0x0); // Make sure the encoding below is valid. return elemStructType; } else { assert(elemTyp != TYP_STRUCT); elemTyp = varTypeToSigned(elemTyp); return CORINFO_CLASS_HANDLE(size_t(elemTyp) << 1 | 0x1); } } // If "clsHnd" is the result of an "EncodePrim" call, returns true and sets "*pPrimType" to the // var_types it represents. Otherwise, returns TYP_STRUCT (on the assumption that "clsHnd" is // the struct type of the element). static var_types DecodeElemType(CORINFO_CLASS_HANDLE clsHnd) { size_t clsHndVal = size_t(clsHnd); if (clsHndVal & 0x1) { return var_types(clsHndVal >> 1); } else { return TYP_STRUCT; } } // Convert a BYTE which represents the VM's CorInfoGCtype to the JIT's var_types var_types getJitGCType(BYTE gcType); // Returns true if the provided type should be treated as a primitive type // for the unmanaged calling conventions. bool isNativePrimitiveStructType(CORINFO_CLASS_HANDLE clsHnd); enum structPassingKind { SPK_Unknown, // Invalid value, never returned SPK_PrimitiveType, // The struct is passed/returned using a primitive type. SPK_EnclosingType, // Like SPK_Primitive type, but used for return types that // require a primitive type temp that is larger than the struct size. // Currently used for structs of size 3, 5, 6, or 7 bytes. SPK_ByValue, // The struct is passed/returned by value (using the ABI rules) // for ARM64 and UNIX_X64 in multiple registers. (when all of the // parameters registers are used, then the stack will be used) // for X86 passed on the stack, for ARM32 passed in registers // or the stack or split between registers and the stack. SPK_ByValueAsHfa, // The struct is passed/returned as an HFA in multiple registers. SPK_ByReference }; // The struct is passed/returned by reference to a copy/buffer. // Get the "primitive" type that is is used when we are given a struct of size 'structSize'. // For pointer sized structs the 'clsHnd' is used to determine if the struct contains GC ref. // A "primitive" type is one of the scalar types: byte, short, int, long, ref, float, double // If we can't or shouldn't use a "primitive" type then TYP_UNKNOWN is returned. // // isVarArg is passed for use on Windows Arm64 to change the decision returned regarding // hfa types. // var_types getPrimitiveTypeForStruct(unsigned structSize, CORINFO_CLASS_HANDLE clsHnd, bool isVarArg); // Get the type that is used to pass values of the given struct type. // isVarArg is passed for use on Windows Arm64 to change the decision returned regarding // hfa types. // var_types getArgTypeForStruct(CORINFO_CLASS_HANDLE clsHnd, structPassingKind* wbPassStruct, bool isVarArg, unsigned structSize); // Get the type that is used to return values of the given struct type. // If the size is unknown, pass 0 and it will be determined from 'clsHnd'. var_types getReturnTypeForStruct(CORINFO_CLASS_HANDLE clsHnd, CorInfoCallConvExtension callConv, structPassingKind* wbPassStruct = nullptr, unsigned structSize = 0); #ifdef DEBUG // Print a representation of "vnp" or "vn" on standard output. // If "level" is non-zero, we also print out a partial expansion of the value. void vnpPrint(ValueNumPair vnp, unsigned level); void vnPrint(ValueNum vn, unsigned level); #endif bool fgDominate(BasicBlock* b1, BasicBlock* b2); // Return true if b1 dominates b2 // Dominator computation member functions // Not exposed outside Compiler protected: bool fgReachable(BasicBlock* b1, BasicBlock* b2); // Returns true if block b1 can reach block b2 // Compute immediate dominators, the dominator tree and and its pre/post-order travsersal numbers. void fgComputeDoms(); void fgCompDominatedByExceptionalEntryBlocks(); BlockSet_ValRet_T fgGetDominatorSet(BasicBlock* block); // Returns a set of blocks that dominate the given block. // Note: this is relatively slow compared to calling fgDominate(), // especially if dealing with a single block versus block check. void fgComputeReachabilitySets(); // Compute bbReach sets. (Also sets BBF_GC_SAFE_POINT flag on blocks.) void fgComputeReturnBlocks(); // Initialize fgReturnBlocks to a list of BBJ_RETURN blocks. void fgComputeEnterBlocksSet(); // Compute the set of entry blocks, 'fgEnterBlks'. bool fgRemoveUnreachableBlocks(); // Remove blocks determined to be unreachable by the bbReach sets. void fgComputeReachability(); // Perform flow graph node reachability analysis. BasicBlock* fgIntersectDom(BasicBlock* a, BasicBlock* b); // Intersect two immediate dominator sets. void fgDfsInvPostOrder(); // In order to compute dominance using fgIntersectDom, the flow graph nodes must be // processed in topological sort, this function takes care of that. void fgDfsInvPostOrderHelper(BasicBlock* block, BlockSet& visited, unsigned* count); BlockSet_ValRet_T fgDomFindStartNodes(); // Computes which basic blocks don't have incoming edges in the flow graph. // Returns this as a set. INDEBUG(void fgDispDomTree(DomTreeNode* domTree);) // Helper that prints out the Dominator Tree in debug builds. DomTreeNode* fgBuildDomTree(); // Once we compute all the immediate dominator sets for each node in the flow graph // (performed by fgComputeDoms), this procedure builds the dominance tree represented // adjacency lists. // In order to speed up the queries of the form 'Does A dominates B', we can perform a DFS preorder and postorder // traversal of the dominance tree and the dominance query will become A dominates B iif preOrder(A) <= preOrder(B) // && postOrder(A) >= postOrder(B) making the computation O(1). void fgNumberDomTree(DomTreeNode* domTree); // When the flow graph changes, we need to update the block numbers, predecessor lists, reachability sets, // dominators, and possibly loops. void fgUpdateChangedFlowGraph(const bool computePreds = true, const bool computeDoms = true, const bool computeReturnBlocks = false, const bool computeLoops = false); public: // Compute the predecessors of the blocks in the control flow graph. void fgComputePreds(); // Remove all predecessor information. void fgRemovePreds(); // Compute the cheap flow graph predecessors lists. This is used in some early phases // before the full predecessors lists are computed. void fgComputeCheapPreds(); private: void fgAddCheapPred(BasicBlock* block, BasicBlock* blockPred); void fgRemoveCheapPred(BasicBlock* block, BasicBlock* blockPred); public: enum GCPollType { GCPOLL_NONE, GCPOLL_CALL, GCPOLL_INLINE }; // Initialize the per-block variable sets (used for liveness analysis). void fgInitBlockVarSets(); PhaseStatus fgInsertGCPolls(); BasicBlock* fgCreateGCPoll(GCPollType pollType, BasicBlock* block); // Requires that "block" is a block that returns from // a finally. Returns the number of successors (jump targets of // of blocks in the covered "try" that did a "LEAVE".) unsigned fgNSuccsOfFinallyRet(BasicBlock* block); // Requires that "block" is a block that returns (in the sense of BBJ_EHFINALLYRET) from // a finally. Returns its "i"th successor (jump targets of // of blocks in the covered "try" that did a "LEAVE".) // Requires that "i" < fgNSuccsOfFinallyRet(block). BasicBlock* fgSuccOfFinallyRet(BasicBlock* block, unsigned i); private: // Factor out common portions of the impls of the methods above. void fgSuccOfFinallyRetWork(BasicBlock* block, unsigned i, BasicBlock** bres, unsigned* nres); public: // For many purposes, it is desirable to be able to enumerate the *distinct* targets of a switch statement, // skipping duplicate targets. (E.g., in flow analyses that are only interested in the set of possible targets.) // SwitchUniqueSuccSet contains the non-duplicated switch targets. // (Code that modifies the jump table of a switch has an obligation to call Compiler::UpdateSwitchTableTarget, // which in turn will call the "UpdateTarget" method of this type if a SwitchUniqueSuccSet has already // been computed for the switch block. If a switch block is deleted or is transformed into a non-switch, // we leave the entry associated with the block, but it will no longer be accessed.) struct SwitchUniqueSuccSet { unsigned numDistinctSuccs; // Number of distinct targets of the switch. BasicBlock** nonDuplicates; // Array of "numDistinctSuccs", containing all the distinct switch target // successors. // The switch block "switchBlk" just had an entry with value "from" modified to the value "to". // Update "this" as necessary: if "from" is no longer an element of the jump table of "switchBlk", // remove it from "this", and ensure that "to" is a member. Use "alloc" to do any required allocation. void UpdateTarget(CompAllocator alloc, BasicBlock* switchBlk, BasicBlock* from, BasicBlock* to); }; typedef JitHashTable<BasicBlock*, JitPtrKeyFuncs<BasicBlock>, SwitchUniqueSuccSet> BlockToSwitchDescMap; private: // Maps BasicBlock*'s that end in switch statements to SwitchUniqueSuccSets that allow // iteration over only the distinct successors. BlockToSwitchDescMap* m_switchDescMap; public: BlockToSwitchDescMap* GetSwitchDescMap(bool createIfNull = true) { if ((m_switchDescMap == nullptr) && createIfNull) { m_switchDescMap = new (getAllocator()) BlockToSwitchDescMap(getAllocator()); } return m_switchDescMap; } // Invalidate the map of unique switch block successors. For example, since the hash key of the map // depends on block numbers, we must invalidate the map when the blocks are renumbered, to ensure that // we don't accidentally look up and return the wrong switch data. void InvalidateUniqueSwitchSuccMap() { m_switchDescMap = nullptr; } // Requires "switchBlock" to be a block that ends in a switch. Returns // the corresponding SwitchUniqueSuccSet. SwitchUniqueSuccSet GetDescriptorForSwitch(BasicBlock* switchBlk); // The switch block "switchBlk" just had an entry with value "from" modified to the value "to". // Update "this" as necessary: if "from" is no longer an element of the jump table of "switchBlk", // remove it from "this", and ensure that "to" is a member. void UpdateSwitchTableTarget(BasicBlock* switchBlk, BasicBlock* from, BasicBlock* to); // Remove the "SwitchUniqueSuccSet" of "switchBlk" in the BlockToSwitchDescMap. void fgInvalidateSwitchDescMapEntry(BasicBlock* switchBlk); BasicBlock* fgFirstBlockOfHandler(BasicBlock* block); bool fgIsFirstBlockOfFilterOrHandler(BasicBlock* block); flowList* fgGetPredForBlock(BasicBlock* block, BasicBlock* blockPred); flowList* fgGetPredForBlock(BasicBlock* block, BasicBlock* blockPred, flowList*** ptrToPred); flowList* fgRemoveRefPred(BasicBlock* block, BasicBlock* blockPred); flowList* fgRemoveAllRefPreds(BasicBlock* block, BasicBlock* blockPred); void fgRemoveBlockAsPred(BasicBlock* block); void fgChangeSwitchBlock(BasicBlock* oldSwitchBlock, BasicBlock* newSwitchBlock); void fgReplaceSwitchJumpTarget(BasicBlock* blockSwitch, BasicBlock* newTarget, BasicBlock* oldTarget); void fgReplaceJumpTarget(BasicBlock* block, BasicBlock* newTarget, BasicBlock* oldTarget); void fgReplacePred(BasicBlock* block, BasicBlock* oldPred, BasicBlock* newPred); flowList* fgAddRefPred(BasicBlock* block, BasicBlock* blockPred, flowList* oldEdge = nullptr, bool initializingPreds = false); // Only set to 'true' when we are computing preds in // fgComputePreds() void fgFindBasicBlocks(); bool fgIsBetterFallThrough(BasicBlock* bCur, BasicBlock* bAlt); bool fgCheckEHCanInsertAfterBlock(BasicBlock* blk, unsigned regionIndex, bool putInTryRegion); BasicBlock* fgFindInsertPoint(unsigned regionIndex, bool putInTryRegion, BasicBlock* startBlk, BasicBlock* endBlk, BasicBlock* nearBlk, BasicBlock* jumpBlk, bool runRarely); unsigned fgGetNestingLevel(BasicBlock* block, unsigned* pFinallyNesting = nullptr); void fgPostImportationCleanup(); void fgRemoveStmt(BasicBlock* block, Statement* stmt DEBUGARG(bool isUnlink = false)); void fgUnlinkStmt(BasicBlock* block, Statement* stmt); bool fgCheckRemoveStmt(BasicBlock* block, Statement* stmt); void fgCreateLoopPreHeader(unsigned lnum); void fgUnreachableBlock(BasicBlock* block); void fgRemoveConditionalJump(BasicBlock* block); BasicBlock* fgLastBBInMainFunction(); BasicBlock* fgEndBBAfterMainFunction(); void fgUnlinkRange(BasicBlock* bBeg, BasicBlock* bEnd); void fgRemoveBlock(BasicBlock* block, bool unreachable); bool fgCanCompactBlocks(BasicBlock* block, BasicBlock* bNext); void fgCompactBlocks(BasicBlock* block, BasicBlock* bNext); void fgUpdateLoopsAfterCompacting(BasicBlock* block, BasicBlock* bNext); BasicBlock* fgConnectFallThrough(BasicBlock* bSrc, BasicBlock* bDst); bool fgRenumberBlocks(); bool fgExpandRarelyRunBlocks(); bool fgEhAllowsMoveBlock(BasicBlock* bBefore, BasicBlock* bAfter); void fgMoveBlocksAfter(BasicBlock* bStart, BasicBlock* bEnd, BasicBlock* insertAfterBlk); enum FG_RELOCATE_TYPE { FG_RELOCATE_TRY, // relocate the 'try' region FG_RELOCATE_HANDLER // relocate the handler region (including the filter if necessary) }; BasicBlock* fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE relocateType); #if defined(FEATURE_EH_FUNCLETS) #if defined(TARGET_ARM) void fgClearFinallyTargetBit(BasicBlock* block); #endif // defined(TARGET_ARM) bool fgIsIntraHandlerPred(BasicBlock* predBlock, BasicBlock* block); bool fgAnyIntraHandlerPreds(BasicBlock* block); void fgInsertFuncletPrologBlock(BasicBlock* block); void fgCreateFuncletPrologBlocks(); void fgCreateFunclets(); #else // !FEATURE_EH_FUNCLETS bool fgRelocateEHRegions(); #endif // !FEATURE_EH_FUNCLETS bool fgOptimizeUncondBranchToSimpleCond(BasicBlock* block, BasicBlock* target); bool fgBlockEndFavorsTailDuplication(BasicBlock* block, unsigned lclNum); bool fgBlockIsGoodTailDuplicationCandidate(BasicBlock* block, unsigned* lclNum); bool fgOptimizeEmptyBlock(BasicBlock* block); bool fgOptimizeBranchToEmptyUnconditional(BasicBlock* block, BasicBlock* bDest); bool fgOptimizeBranch(BasicBlock* bJump); bool fgOptimizeSwitchBranches(BasicBlock* block); bool fgOptimizeBranchToNext(BasicBlock* block, BasicBlock* bNext, BasicBlock* bPrev); bool fgOptimizeSwitchJumps(); #ifdef DEBUG void fgPrintEdgeWeights(); #endif void fgComputeBlockAndEdgeWeights(); weight_t fgComputeMissingBlockWeights(); void fgComputeCalledCount(weight_t returnWeight); void fgComputeEdgeWeights(); bool fgReorderBlocks(); PhaseStatus fgDetermineFirstColdBlock(); bool fgIsForwardBranch(BasicBlock* bJump, BasicBlock* bSrc = nullptr); bool fgUpdateFlowGraph(bool doTailDup = false); void fgFindOperOrder(); // method that returns if you should split here typedef bool(fgSplitPredicate)(GenTree* tree, GenTree* parent, fgWalkData* data); void fgSetBlockOrder(); void fgRemoveReturnBlock(BasicBlock* block); /* Helper code that has been factored out */ inline void fgConvertBBToThrowBB(BasicBlock* block); bool fgCastNeeded(GenTree* tree, var_types toType); GenTree* fgDoNormalizeOnStore(GenTree* tree); GenTree* fgMakeTmpArgNode(fgArgTabEntry* curArgTabEntry); // The following check for loops that don't execute calls bool fgLoopCallMarked; void fgLoopCallTest(BasicBlock* srcBB, BasicBlock* dstBB); void fgLoopCallMark(); void fgMarkLoopHead(BasicBlock* block); unsigned fgGetCodeEstimate(BasicBlock* block); #if DUMP_FLOWGRAPHS enum class PhasePosition { PrePhase, PostPhase }; const char* fgProcessEscapes(const char* nameIn, escapeMapping_t* map); static void fgDumpTree(FILE* fgxFile, GenTree* const tree); FILE* fgOpenFlowGraphFile(bool* wbDontClose, Phases phase, PhasePosition pos, LPCWSTR type); bool fgDumpFlowGraph(Phases phase, PhasePosition pos); #endif // DUMP_FLOWGRAPHS #ifdef DEBUG void fgDispDoms(); void fgDispReach(); void fgDispBBLiveness(BasicBlock* block); void fgDispBBLiveness(); void fgTableDispBasicBlock(BasicBlock* block, int ibcColWidth = 0); void fgDispBasicBlocks(BasicBlock* firstBlock, BasicBlock* lastBlock, bool dumpTrees); void fgDispBasicBlocks(bool dumpTrees = false); void fgDumpStmtTree(Statement* stmt, unsigned bbNum); void fgDumpBlock(BasicBlock* block); void fgDumpTrees(BasicBlock* firstBlock, BasicBlock* lastBlock); static fgWalkPreFn fgStress64RsltMulCB; void fgStress64RsltMul(); void fgDebugCheckUpdate(); void fgDebugCheckBBNumIncreasing(); void fgDebugCheckBBlist(bool checkBBNum = false, bool checkBBRefs = true); void fgDebugCheckBlockLinks(); void fgDebugCheckLinks(bool morphTrees = false); void fgDebugCheckStmtsList(BasicBlock* block, bool morphTrees); void fgDebugCheckNodeLinks(BasicBlock* block, Statement* stmt); void fgDebugCheckNodesUniqueness(); void fgDebugCheckLoopTable(); void fgDebugCheckFlags(GenTree* tree); void fgDebugCheckDispFlags(GenTree* tree, GenTreeFlags dispFlags, GenTreeDebugFlags debugFlags); void fgDebugCheckFlagsHelper(GenTree* tree, GenTreeFlags actualFlags, GenTreeFlags expectedFlags); void fgDebugCheckTryFinallyExits(); void fgDebugCheckProfileData(); bool fgDebugCheckIncomingProfileData(BasicBlock* block); bool fgDebugCheckOutgoingProfileData(BasicBlock* block); #endif // DEBUG static bool fgProfileWeightsEqual(weight_t weight1, weight_t weight2); static bool fgProfileWeightsConsistent(weight_t weight1, weight_t weight2); static GenTree* fgGetFirstNode(GenTree* tree); //--------------------- Walking the trees in the IR ----------------------- struct fgWalkData { Compiler* compiler; fgWalkPreFn* wtprVisitorFn; fgWalkPostFn* wtpoVisitorFn; void* pCallbackData; // user-provided data GenTree* parent; // parent of current node, provided to callback GenTreeStack* parentStack; // stack of parent nodes, if asked for bool wtprLclsOnly; // whether to only visit lclvar nodes #ifdef DEBUG bool printModified; // callback can use this #endif }; fgWalkResult fgWalkTreePre(GenTree** pTree, fgWalkPreFn* visitor, void* pCallBackData = nullptr, bool lclVarsOnly = false, bool computeStack = false); fgWalkResult fgWalkTree(GenTree** pTree, fgWalkPreFn* preVisitor, fgWalkPostFn* postVisitor, void* pCallBackData = nullptr); void fgWalkAllTreesPre(fgWalkPreFn* visitor, void* pCallBackData); //----- Postorder fgWalkResult fgWalkTreePost(GenTree** pTree, fgWalkPostFn* visitor, void* pCallBackData = nullptr, bool computeStack = false); // An fgWalkPreFn that looks for expressions that have inline throws in // minopts mode. Basically it looks for tress with gtOverflowEx() or // GTF_IND_RNGCHK. It returns WALK_ABORT if one is found. It // returns WALK_SKIP_SUBTREES if GTF_EXCEPT is not set (assumes flags // properly propagated to parent trees). It returns WALK_CONTINUE // otherwise. static fgWalkResult fgChkThrowCB(GenTree** pTree, Compiler::fgWalkData* data); static fgWalkResult fgChkLocAllocCB(GenTree** pTree, Compiler::fgWalkData* data); static fgWalkResult fgChkQmarkCB(GenTree** pTree, Compiler::fgWalkData* data); /************************************************************************** * PROTECTED *************************************************************************/ protected: friend class SsaBuilder; friend struct ValueNumberState; //--------------------- Detect the basic blocks --------------------------- BasicBlock** fgBBs; // Table of pointers to the BBs void fgInitBBLookup(); BasicBlock* fgLookupBB(unsigned addr); bool fgCanSwitchToOptimized(); void fgSwitchToOptimized(const char* reason); bool fgMayExplicitTailCall(); void fgFindJumpTargets(const BYTE* codeAddr, IL_OFFSET codeSize, FixedBitVect* jumpTarget); void fgMarkBackwardJump(BasicBlock* startBlock, BasicBlock* endBlock); void fgLinkBasicBlocks(); unsigned fgMakeBasicBlocks(const BYTE* codeAddr, IL_OFFSET codeSize, FixedBitVect* jumpTarget); void fgCheckBasicBlockControlFlow(); void fgControlFlowPermitted(BasicBlock* blkSrc, BasicBlock* blkDest, bool IsLeave = false /* is the src a leave block */); bool fgFlowToFirstBlockOfInnerTry(BasicBlock* blkSrc, BasicBlock* blkDest, bool sibling); void fgObserveInlineConstants(OPCODE opcode, const FgStack& stack, bool isInlining); void fgAdjustForAddressExposedOrWrittenThis(); unsigned fgStressBBProf() { #ifdef DEBUG unsigned result = JitConfig.JitStressBBProf(); if (result == 0) { if (compStressCompile(STRESS_BB_PROFILE, 15)) { result = 1; } } return result; #else return 0; #endif } bool fgHaveProfileData(); bool fgGetProfileWeightForBasicBlock(IL_OFFSET offset, weight_t* weight); Instrumentor* fgCountInstrumentor; Instrumentor* fgClassInstrumentor; PhaseStatus fgPrepareToInstrumentMethod(); PhaseStatus fgInstrumentMethod(); PhaseStatus fgIncorporateProfileData(); void fgIncorporateBlockCounts(); void fgIncorporateEdgeCounts(); CORINFO_CLASS_HANDLE getRandomClass(ICorJitInfo::PgoInstrumentationSchema* schema, UINT32 countSchemaItems, BYTE* pInstrumentationData, int32_t ilOffset, CLRRandom* random); public: const char* fgPgoFailReason; bool fgPgoDisabled; ICorJitInfo::PgoSource fgPgoSource; ICorJitInfo::PgoInstrumentationSchema* fgPgoSchema; BYTE* fgPgoData; UINT32 fgPgoSchemaCount; HRESULT fgPgoQueryResult; UINT32 fgNumProfileRuns; UINT32 fgPgoBlockCounts; UINT32 fgPgoEdgeCounts; UINT32 fgPgoClassProfiles; unsigned fgPgoInlineePgo; unsigned fgPgoInlineeNoPgo; unsigned fgPgoInlineeNoPgoSingleBlock; void WalkSpanningTree(SpanningTreeVisitor* visitor); void fgSetProfileWeight(BasicBlock* block, weight_t weight); void fgApplyProfileScale(); bool fgHaveSufficientProfileData(); bool fgHaveTrustedProfileData(); // fgIsUsingProfileWeights - returns true if we have real profile data for this method // or if we have some fake profile data for the stress mode bool fgIsUsingProfileWeights() { return (fgHaveProfileData() || fgStressBBProf()); } // fgProfileRunsCount - returns total number of scenario runs for the profile data // or BB_UNITY_WEIGHT_UNSIGNED when we aren't using profile data. unsigned fgProfileRunsCount() { return fgIsUsingProfileWeights() ? fgNumProfileRuns : BB_UNITY_WEIGHT_UNSIGNED; } //-------- Insert a statement at the start or end of a basic block -------- #ifdef DEBUG public: static bool fgBlockContainsStatementBounded(BasicBlock* block, Statement* stmt, bool answerOnBoundExceeded = true); #endif public: Statement* fgNewStmtAtBeg(BasicBlock* block, GenTree* tree, const DebugInfo& di = DebugInfo()); void fgInsertStmtAtEnd(BasicBlock* block, Statement* stmt); Statement* fgNewStmtAtEnd(BasicBlock* block, GenTree* tree, const DebugInfo& di = DebugInfo()); Statement* fgNewStmtNearEnd(BasicBlock* block, GenTree* tree, const DebugInfo& di = DebugInfo()); private: void fgInsertStmtNearEnd(BasicBlock* block, Statement* stmt); void fgInsertStmtAtBeg(BasicBlock* block, Statement* stmt); void fgInsertStmtAfter(BasicBlock* block, Statement* insertionPoint, Statement* stmt); public: void fgInsertStmtBefore(BasicBlock* block, Statement* insertionPoint, Statement* stmt); private: Statement* fgInsertStmtListAfter(BasicBlock* block, Statement* stmtAfter, Statement* stmtList); // Create a new temporary variable to hold the result of *ppTree, // and transform the graph accordingly. GenTree* fgInsertCommaFormTemp(GenTree** ppTree, CORINFO_CLASS_HANDLE structType = nullptr); GenTree* fgMakeMultiUse(GenTree** ppTree); private: // Recognize a bitwise rotation pattern and convert into a GT_ROL or a GT_ROR node. GenTree* fgRecognizeAndMorphBitwiseRotation(GenTree* tree); bool fgOperIsBitwiseRotationRoot(genTreeOps oper); #if !defined(TARGET_64BIT) // Recognize and morph a long multiplication with 32 bit operands. GenTreeOp* fgRecognizeAndMorphLongMul(GenTreeOp* mul); GenTreeOp* fgMorphLongMul(GenTreeOp* mul); #endif //-------- Determine the order in which the trees will be evaluated ------- unsigned fgTreeSeqNum; GenTree* fgTreeSeqLst; GenTree* fgTreeSeqBeg; GenTree* fgSetTreeSeq(GenTree* tree, GenTree* prev = nullptr, bool isLIR = false); void fgSetTreeSeqHelper(GenTree* tree, bool isLIR); void fgSetTreeSeqFinish(GenTree* tree, bool isLIR); void fgSetStmtSeq(Statement* stmt); void fgSetBlockOrder(BasicBlock* block); //------------------------- Morphing -------------------------------------- unsigned fgPtrArgCntMax; public: //------------------------------------------------------------------------ // fgGetPtrArgCntMax: Return the maximum number of pointer-sized stack arguments that calls inside this method // can push on the stack. This value is calculated during morph. // // Return Value: // Returns fgPtrArgCntMax, that is a private field. // unsigned fgGetPtrArgCntMax() const { return fgPtrArgCntMax; } //------------------------------------------------------------------------ // fgSetPtrArgCntMax: Set the maximum number of pointer-sized stack arguments that calls inside this method // can push on the stack. This function is used during StackLevelSetter to fix incorrect morph calculations. // void fgSetPtrArgCntMax(unsigned argCntMax) { fgPtrArgCntMax = argCntMax; } bool compCanEncodePtrArgCntMax(); private: hashBv* fgOutgoingArgTemps; hashBv* fgCurrentlyInUseArgTemps; void fgSetRngChkTarget(GenTree* tree, bool delay = true); BasicBlock* fgSetRngChkTargetInner(SpecialCodeKind kind, bool delay); #if REARRANGE_ADDS void fgMoveOpsLeft(GenTree* tree); #endif bool fgIsCommaThrow(GenTree* tree, bool forFolding = false); bool fgIsThrow(GenTree* tree); bool fgInDifferentRegions(BasicBlock* blk1, BasicBlock* blk2); bool fgIsBlockCold(BasicBlock* block); GenTree* fgMorphCastIntoHelper(GenTree* tree, int helper, GenTree* oper); GenTree* fgMorphIntoHelperCall(GenTree* tree, int helper, GenTreeCall::Use* args, bool morphArgs = true); GenTree* fgMorphStackArgForVarArgs(unsigned lclNum, var_types varType, unsigned lclOffs); // A "MorphAddrContext" carries information from the surrounding context. If we are evaluating a byref address, // it is useful to know whether the address will be immediately dereferenced, or whether the address value will // be used, perhaps by passing it as an argument to a called method. This affects how null checking is done: // for sufficiently small offsets, we can rely on OS page protection to implicitly null-check addresses that we // know will be dereferenced. To know that reliance on implicit null checking is sound, we must further know that // all offsets between the top-level indirection and the bottom are constant, and that their sum is sufficiently // small; hence the other fields of MorphAddrContext. enum MorphAddrContextKind { MACK_Ind, MACK_Addr, }; struct MorphAddrContext { MorphAddrContextKind m_kind; bool m_allConstantOffsets; // Valid only for "m_kind == MACK_Ind". True iff all offsets between // top-level indirection and here have been constants. size_t m_totalOffset; // Valid only for "m_kind == MACK_Ind", and if "m_allConstantOffsets" is true. // In that case, is the sum of those constant offsets. MorphAddrContext(MorphAddrContextKind kind) : m_kind(kind), m_allConstantOffsets(true), m_totalOffset(0) { } }; // A MACK_CopyBlock context is immutable, so we can just make one of these and share it. static MorphAddrContext s_CopyBlockMAC; #ifdef FEATURE_SIMD GenTree* getSIMDStructFromField(GenTree* tree, CorInfoType* simdBaseJitTypeOut, unsigned* indexOut, unsigned* simdSizeOut, bool ignoreUsedInSIMDIntrinsic = false); GenTree* fgMorphFieldAssignToSimdSetElement(GenTree* tree); GenTree* fgMorphFieldToSimdGetElement(GenTree* tree); bool fgMorphCombineSIMDFieldAssignments(BasicBlock* block, Statement* stmt); void impMarkContiguousSIMDFieldAssignments(Statement* stmt); // fgPreviousCandidateSIMDFieldAsgStmt is only used for tracking previous simd field assignment // in function: Complier::impMarkContiguousSIMDFieldAssignments. Statement* fgPreviousCandidateSIMDFieldAsgStmt; #endif // FEATURE_SIMD GenTree* fgMorphArrayIndex(GenTree* tree); GenTree* fgMorphExpandCast(GenTreeCast* tree); GenTreeFieldList* fgMorphLclArgToFieldlist(GenTreeLclVarCommon* lcl); void fgInitArgInfo(GenTreeCall* call); GenTreeCall* fgMorphArgs(GenTreeCall* call); void fgMakeOutgoingStructArgCopy(GenTreeCall* call, GenTreeCall::Use* args, CORINFO_CLASS_HANDLE copyBlkClass); GenTree* fgMorphLocalVar(GenTree* tree, bool forceRemorph); public: bool fgAddrCouldBeNull(GenTree* addr); private: GenTree* fgMorphField(GenTree* tree, MorphAddrContext* mac); bool fgCanFastTailCall(GenTreeCall* call, const char** failReason); #if FEATURE_FASTTAILCALL bool fgCallHasMustCopyByrefParameter(GenTreeCall* callee); #endif bool fgCheckStmtAfterTailCall(); GenTree* fgMorphTailCallViaHelpers(GenTreeCall* call, CORINFO_TAILCALL_HELPERS& help); bool fgCanTailCallViaJitHelper(); void fgMorphTailCallViaJitHelper(GenTreeCall* call); GenTree* fgCreateCallDispatcherAndGetResult(GenTreeCall* origCall, CORINFO_METHOD_HANDLE callTargetStubHnd, CORINFO_METHOD_HANDLE dispatcherHnd); GenTree* getLookupTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, GenTreeFlags handleFlags, void* compileTimeHandle); GenTree* getRuntimeLookupTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, void* compileTimeHandle); GenTree* getVirtMethodPointerTree(GenTree* thisPtr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo); GenTree* getTokenHandleTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool parent); GenTree* fgMorphPotentialTailCall(GenTreeCall* call); GenTree* fgGetStubAddrArg(GenTreeCall* call); unsigned fgGetArgTabEntryParameterLclNum(GenTreeCall* call, fgArgTabEntry* argTabEntry); void fgMorphRecursiveFastTailCallIntoLoop(BasicBlock* block, GenTreeCall* recursiveTailCall); Statement* fgAssignRecursiveCallArgToCallerParam(GenTree* arg, fgArgTabEntry* argTabEntry, unsigned lclParamNum, BasicBlock* block, const DebugInfo& callDI, Statement* tmpAssignmentInsertionPoint, Statement* paramAssignmentInsertionPoint); GenTree* fgMorphCall(GenTreeCall* call); GenTree* fgExpandVirtualVtableCallTarget(GenTreeCall* call); void fgMorphCallInline(GenTreeCall* call, InlineResult* result); void fgMorphCallInlineHelper(GenTreeCall* call, InlineResult* result, InlineContext** createdContext); #if DEBUG void fgNoteNonInlineCandidate(Statement* stmt, GenTreeCall* call); static fgWalkPreFn fgFindNonInlineCandidate; #endif GenTree* fgOptimizeDelegateConstructor(GenTreeCall* call, CORINFO_CONTEXT_HANDLE* ExactContextHnd, CORINFO_RESOLVED_TOKEN* ldftnToken); GenTree* fgMorphLeaf(GenTree* tree); void fgAssignSetVarDef(GenTree* tree); GenTree* fgMorphOneAsgBlockOp(GenTree* tree); GenTree* fgMorphInitBlock(GenTree* tree); GenTree* fgMorphPromoteLocalInitBlock(GenTreeLclVar* destLclNode, GenTree* initVal, unsigned blockSize); GenTree* fgMorphGetStructAddr(GenTree** pTree, CORINFO_CLASS_HANDLE clsHnd, bool isRValue = false); GenTree* fgMorphBlockOperand(GenTree* tree, var_types asgType, unsigned blockWidth, bool isBlkReqd); GenTree* fgMorphCopyBlock(GenTree* tree); GenTree* fgMorphStoreDynBlock(GenTreeStoreDynBlk* tree); GenTree* fgMorphForRegisterFP(GenTree* tree); GenTree* fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac = nullptr); GenTree* fgOptimizeCast(GenTreeCast* cast); GenTree* fgOptimizeEqualityComparisonWithConst(GenTreeOp* cmp); GenTree* fgOptimizeRelationalComparisonWithConst(GenTreeOp* cmp); #ifdef FEATURE_HW_INTRINSICS GenTree* fgOptimizeHWIntrinsic(GenTreeHWIntrinsic* node); #endif GenTree* fgOptimizeCommutativeArithmetic(GenTreeOp* tree); GenTree* fgOptimizeRelationalComparisonWithCasts(GenTreeOp* cmp); GenTree* fgOptimizeAddition(GenTreeOp* add); GenTree* fgOptimizeMultiply(GenTreeOp* mul); GenTree* fgOptimizeBitwiseAnd(GenTreeOp* andOp); GenTree* fgPropagateCommaThrow(GenTree* parent, GenTreeOp* commaThrow, GenTreeFlags precedingSideEffects); GenTree* fgMorphRetInd(GenTreeUnOp* tree); GenTree* fgMorphModToSubMulDiv(GenTreeOp* tree); GenTree* fgMorphSmpOpOptional(GenTreeOp* tree); GenTree* fgMorphMultiOp(GenTreeMultiOp* multiOp); GenTree* fgMorphConst(GenTree* tree); bool fgMorphCanUseLclFldForCopy(unsigned lclNum1, unsigned lclNum2); GenTreeLclVar* fgMorphTryFoldObjAsLclVar(GenTreeObj* obj, bool destroyNodes = true); GenTreeOp* fgMorphCommutative(GenTreeOp* tree); GenTree* fgMorphCastedBitwiseOp(GenTreeOp* tree); GenTree* fgMorphReduceAddOps(GenTree* tree); public: GenTree* fgMorphTree(GenTree* tree, MorphAddrContext* mac = nullptr); private: void fgKillDependentAssertionsSingle(unsigned lclNum DEBUGARG(GenTree* tree)); void fgKillDependentAssertions(unsigned lclNum DEBUGARG(GenTree* tree)); void fgMorphTreeDone(GenTree* tree, GenTree* oldTree = nullptr DEBUGARG(int morphNum = 0)); Statement* fgMorphStmt; unsigned fgGetBigOffsetMorphingTemp(var_types type); // We cache one temp per type to be // used when morphing big offset. //----------------------- Liveness analysis ------------------------------- VARSET_TP fgCurUseSet; // vars used by block (before an assignment) VARSET_TP fgCurDefSet; // vars assigned by block (before a use) MemoryKindSet fgCurMemoryUse; // True iff the current basic block uses memory. MemoryKindSet fgCurMemoryDef; // True iff the current basic block modifies memory. MemoryKindSet fgCurMemoryHavoc; // True if the current basic block is known to set memory to a "havoc" value. bool byrefStatesMatchGcHeapStates; // True iff GcHeap and ByrefExposed memory have all the same def points. void fgMarkUseDef(GenTreeLclVarCommon* tree); void fgBeginScopeLife(VARSET_TP* inScope, VarScopeDsc* var); void fgEndScopeLife(VARSET_TP* inScope, VarScopeDsc* var); void fgMarkInScope(BasicBlock* block, VARSET_VALARG_TP inScope); void fgUnmarkInScope(BasicBlock* block, VARSET_VALARG_TP unmarkScope); void fgExtendDbgScopes(); void fgExtendDbgLifetimes(); #ifdef DEBUG void fgDispDebugScopes(); #endif // DEBUG //------------------------------------------------------------------------- // // The following keeps track of any code we've added for things like array // range checking or explicit calls to enable GC, and so on. // public: struct AddCodeDsc { AddCodeDsc* acdNext; BasicBlock* acdDstBlk; // block to which we jump unsigned acdData; SpecialCodeKind acdKind; // what kind of a special block is this? #if !FEATURE_FIXED_OUT_ARGS bool acdStkLvlInit; // has acdStkLvl value been already set? unsigned acdStkLvl; // stack level in stack slots. #endif // !FEATURE_FIXED_OUT_ARGS }; private: static unsigned acdHelper(SpecialCodeKind codeKind); AddCodeDsc* fgAddCodeList; bool fgAddCodeModf; bool fgRngChkThrowAdded; AddCodeDsc* fgExcptnTargetCache[SCK_COUNT]; BasicBlock* fgRngChkTarget(BasicBlock* block, SpecialCodeKind kind); BasicBlock* fgAddCodeRef(BasicBlock* srcBlk, unsigned refData, SpecialCodeKind kind); public: AddCodeDsc* fgFindExcptnTarget(SpecialCodeKind kind, unsigned refData); bool fgUseThrowHelperBlocks(); AddCodeDsc* fgGetAdditionalCodeDescriptors() { return fgAddCodeList; } private: bool fgIsCodeAdded(); bool fgIsThrowHlpBlk(BasicBlock* block); #if !FEATURE_FIXED_OUT_ARGS unsigned fgThrowHlpBlkStkLevel(BasicBlock* block); #endif // !FEATURE_FIXED_OUT_ARGS unsigned fgBigOffsetMorphingTemps[TYP_COUNT]; unsigned fgCheckInlineDepthAndRecursion(InlineInfo* inlineInfo); void fgInvokeInlineeCompiler(GenTreeCall* call, InlineResult* result, InlineContext** createdContext); void fgInsertInlineeBlocks(InlineInfo* pInlineInfo); Statement* fgInlinePrependStatements(InlineInfo* inlineInfo); void fgInlineAppendStatements(InlineInfo* inlineInfo, BasicBlock* block, Statement* stmt); #if FEATURE_MULTIREG_RET GenTree* fgGetStructAsStructPtr(GenTree* tree); GenTree* fgAssignStructInlineeToVar(GenTree* child, CORINFO_CLASS_HANDLE retClsHnd); void fgAttachStructInlineeToAsg(GenTree* tree, GenTree* child, CORINFO_CLASS_HANDLE retClsHnd); #endif // FEATURE_MULTIREG_RET static fgWalkPreFn fgUpdateInlineReturnExpressionPlaceHolder; static fgWalkPostFn fgLateDevirtualization; #ifdef DEBUG static fgWalkPreFn fgDebugCheckInlineCandidates; void CheckNoTransformableIndirectCallsRemain(); static fgWalkPreFn fgDebugCheckForTransformableIndirectCalls; #endif void fgPromoteStructs(); void fgMorphStructField(GenTree* tree, GenTree* parent); void fgMorphLocalField(GenTree* tree, GenTree* parent); // Reset the refCount for implicit byrefs. void fgResetImplicitByRefRefCount(); // Change implicit byrefs' types from struct to pointer, and for any that were // promoted, create new promoted struct temps. void fgRetypeImplicitByRefArgs(); // Rewrite appearances of implicit byrefs (manifest the implied additional level of indirection). bool fgMorphImplicitByRefArgs(GenTree* tree); GenTree* fgMorphImplicitByRefArgs(GenTree* tree, bool isAddr); // Clear up annotations for any struct promotion temps created for implicit byrefs. void fgMarkDemotedImplicitByRefArgs(); void fgMarkAddressExposedLocals(); void fgMarkAddressExposedLocals(Statement* stmt); PhaseStatus fgForwardSub(); bool fgForwardSubBlock(BasicBlock* block); bool fgForwardSubStatement(Statement* statement); static fgWalkPreFn fgUpdateSideEffectsPre; static fgWalkPostFn fgUpdateSideEffectsPost; // The given local variable, required to be a struct variable, is being assigned via // a "lclField", to make it masquerade as an integral type in the ABI. Make sure that // the variable is not enregistered, and is therefore not promoted independently. void fgLclFldAssign(unsigned lclNum); static fgWalkPreFn gtHasLocalsWithAddrOpCB; enum TypeProducerKind { TPK_Unknown = 0, // May not be a RuntimeType TPK_Handle = 1, // RuntimeType via handle TPK_GetType = 2, // RuntimeType via Object.get_Type() TPK_Null = 3, // Tree value is null TPK_Other = 4 // RuntimeType via other means }; TypeProducerKind gtGetTypeProducerKind(GenTree* tree); bool gtIsTypeHandleToRuntimeTypeHelper(GenTreeCall* call); bool gtIsTypeHandleToRuntimeTypeHandleHelper(GenTreeCall* call, CorInfoHelpFunc* pHelper = nullptr); bool gtIsActiveCSE_Candidate(GenTree* tree); bool fgIsBigOffset(size_t offset); bool fgNeedReturnSpillTemp(); /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Optimizer XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: void optInit(); GenTree* optRemoveRangeCheck(GenTreeBoundsChk* check, GenTree* comma, Statement* stmt); GenTree* optRemoveStandaloneRangeCheck(GenTreeBoundsChk* check, Statement* stmt); void optRemoveCommaBasedRangeCheck(GenTree* comma, Statement* stmt); protected: // Do hoisting for all loops. void optHoistLoopCode(); // To represent sets of VN's that have already been hoisted in outer loops. typedef JitHashTable<ValueNum, JitSmallPrimitiveKeyFuncs<ValueNum>, bool> VNSet; struct LoopHoistContext { private: // The set of variables hoisted in the current loop (or nullptr if there are none). VNSet* m_pHoistedInCurLoop; public: // Value numbers of expressions that have been hoisted in parent loops in the loop nest. VNSet m_hoistedInParentLoops; // Value numbers of expressions that have been hoisted in the current (or most recent) loop in the nest. // Previous decisions on loop-invariance of value numbers in the current loop. VNSet m_curLoopVnInvariantCache; VNSet* GetHoistedInCurLoop(Compiler* comp) { if (m_pHoistedInCurLoop == nullptr) { m_pHoistedInCurLoop = new (comp->getAllocatorLoopHoist()) VNSet(comp->getAllocatorLoopHoist()); } return m_pHoistedInCurLoop; } VNSet* ExtractHoistedInCurLoop() { VNSet* res = m_pHoistedInCurLoop; m_pHoistedInCurLoop = nullptr; return res; } LoopHoistContext(Compiler* comp) : m_pHoistedInCurLoop(nullptr) , m_hoistedInParentLoops(comp->getAllocatorLoopHoist()) , m_curLoopVnInvariantCache(comp->getAllocatorLoopHoist()) { } }; // Do hoisting for loop "lnum" (an index into the optLoopTable), and all loops nested within it. // Tracks the expressions that have been hoisted by containing loops by temporarily recording their // value numbers in "m_hoistedInParentLoops". This set is not modified by the call. void optHoistLoopNest(unsigned lnum, LoopHoistContext* hoistCtxt); // Do hoisting for a particular loop ("lnum" is an index into the optLoopTable.) // Assumes that expressions have been hoisted in containing loops if their value numbers are in // "m_hoistedInParentLoops". // void optHoistThisLoop(unsigned lnum, LoopHoistContext* hoistCtxt); // Hoist all expressions in "blocks" that are invariant in loop "loopNum" (an index into the optLoopTable) // outside of that loop. Exempt expressions whose value number is in "m_hoistedInParentLoops"; add VN's of hoisted // expressions to "hoistInLoop". void optHoistLoopBlocks(unsigned loopNum, ArrayStack<BasicBlock*>* blocks, LoopHoistContext* hoistContext); // Return true if the tree looks profitable to hoist out of loop 'lnum'. bool optIsProfitableToHoistTree(GenTree* tree, unsigned lnum); // Performs the hoisting 'tree' into the PreHeader for loop 'lnum' void optHoistCandidate(GenTree* tree, BasicBlock* treeBb, unsigned lnum, LoopHoistContext* hoistCtxt); // Returns true iff the ValueNum "vn" represents a value that is loop-invariant in "lnum". // Constants and init values are always loop invariant. // VNPhi's connect VN's to the SSA definition, so we can know if the SSA def occurs in the loop. bool optVNIsLoopInvariant(ValueNum vn, unsigned lnum, VNSet* recordedVNs); // If "blk" is the entry block of a natural loop, returns true and sets "*pLnum" to the index of the loop // in the loop table. bool optBlockIsLoopEntry(BasicBlock* blk, unsigned* pLnum); // Records the set of "side effects" of all loops: fields (object instance and static) // written to, and SZ-array element type equivalence classes updated. void optComputeLoopSideEffects(); #ifdef DEBUG bool optAnyChildNotRemoved(unsigned loopNum); #endif // DEBUG // Mark a loop as removed. void optMarkLoopRemoved(unsigned loopNum); private: // Requires "lnum" to be the index of an outermost loop in the loop table. Traverses the body of that loop, // including all nested loops, and records the set of "side effects" of the loop: fields (object instance and // static) written to, and SZ-array element type equivalence classes updated. void optComputeLoopNestSideEffects(unsigned lnum); // Given a loop number 'lnum' mark it and any nested loops as having 'memoryHavoc' void optRecordLoopNestsMemoryHavoc(unsigned lnum, MemoryKindSet memoryHavoc); // Add the side effects of "blk" (which is required to be within a loop) to all loops of which it is a part. // Returns false if we encounter a block that is not marked as being inside a loop. // bool optComputeLoopSideEffectsOfBlock(BasicBlock* blk); // Hoist the expression "expr" out of loop "lnum". void optPerformHoistExpr(GenTree* expr, BasicBlock* exprBb, unsigned lnum); public: void optOptimizeBools(); public: PhaseStatus optInvertLoops(); // Invert loops so they're entered at top and tested at bottom. PhaseStatus optOptimizeLayout(); // Optimize the BasicBlock layout of the method PhaseStatus optSetBlockWeights(); PhaseStatus optFindLoopsPhase(); // Finds loops and records them in the loop table void optFindLoops(); PhaseStatus optCloneLoops(); void optCloneLoop(unsigned loopInd, LoopCloneContext* context); void optEnsureUniqueHead(unsigned loopInd, weight_t ambientWeight); PhaseStatus optUnrollLoops(); // Unrolls loops (needs to have cost info) void optRemoveRedundantZeroInits(); protected: // This enumeration describes what is killed by a call. enum callInterf { CALLINT_NONE, // no interference (most helpers) CALLINT_REF_INDIRS, // kills GC ref indirections (SETFIELD OBJ) CALLINT_SCL_INDIRS, // kills non GC ref indirections (SETFIELD non-OBJ) CALLINT_ALL_INDIRS, // kills both GC ref and non GC ref indirections (SETFIELD STRUCT) CALLINT_ALL, // kills everything (normal method call) }; enum class FieldKindForVN { SimpleStatic, WithBaseAddr }; public: // A "LoopDsc" describes a ("natural") loop. We (currently) require the body of a loop to be a contiguous (in // bbNext order) sequence of basic blocks. (At times, we may require the blocks in a loop to be "properly numbered" // in bbNext order; we use comparisons on the bbNum to decide order.) // The blocks that define the body are // top <= entry <= bottom // The "head" of the loop is a block outside the loop that has "entry" as a successor. We only support loops with a // single 'head' block. The meanings of these blocks are given in the definitions below. Also see the picture at // Compiler::optFindNaturalLoops(). struct LoopDsc { BasicBlock* lpHead; // HEAD of the loop (not part of the looping of the loop) -- has ENTRY as a successor. BasicBlock* lpTop; // loop TOP (the back edge from lpBottom reaches here). Lexically first block (in bbNext // order) reachable in this loop. BasicBlock* lpEntry; // the ENTRY in the loop (in most cases TOP or BOTTOM) BasicBlock* lpBottom; // loop BOTTOM (from here we have a back edge to the TOP) BasicBlock* lpExit; // if a single exit loop this is the EXIT (in most cases BOTTOM) callInterf lpAsgCall; // "callInterf" for calls in the loop ALLVARSET_TP lpAsgVars; // set of vars assigned within the loop (all vars, not just tracked) varRefKinds lpAsgInds : 8; // set of inds modified within the loop LoopFlags lpFlags; unsigned char lpExitCnt; // number of exits from the loop unsigned char lpParent; // The index of the most-nested loop that completely contains this one, // or else BasicBlock::NOT_IN_LOOP if no such loop exists. unsigned char lpChild; // The index of a nested loop, or else BasicBlock::NOT_IN_LOOP if no child exists. // (Actually, an "immediately" nested loop -- // no other child of this loop is a parent of lpChild.) unsigned char lpSibling; // The index of another loop that is an immediate child of lpParent, // or else BasicBlock::NOT_IN_LOOP. One can enumerate all the children of a loop // by following "lpChild" then "lpSibling" links. bool lpLoopHasMemoryHavoc[MemoryKindCount]; // The loop contains an operation that we assume has arbitrary // memory side effects. If this is set, the fields below // may not be accurate (since they become irrelevant.) VARSET_TP lpVarInOut; // The set of variables that are IN or OUT during the execution of this loop VARSET_TP lpVarUseDef; // The set of variables that are USE or DEF during the execution of this loop // The following counts are used for hoisting profitability checks. int lpHoistedExprCount; // The register count for the non-FP expressions from inside this loop that have been // hoisted int lpLoopVarCount; // The register count for the non-FP LclVars that are read/written inside this loop int lpVarInOutCount; // The register count for the non-FP LclVars that are alive inside or across this loop int lpHoistedFPExprCount; // The register count for the FP expressions from inside this loop that have been // hoisted int lpLoopVarFPCount; // The register count for the FP LclVars that are read/written inside this loop int lpVarInOutFPCount; // The register count for the FP LclVars that are alive inside or across this loop typedef JitHashTable<CORINFO_FIELD_HANDLE, JitPtrKeyFuncs<struct CORINFO_FIELD_STRUCT_>, FieldKindForVN> FieldHandleSet; FieldHandleSet* lpFieldsModified; // This has entries for all static field and object instance fields modified // in the loop. typedef JitHashTable<CORINFO_CLASS_HANDLE, JitPtrKeyFuncs<struct CORINFO_CLASS_STRUCT_>, bool> ClassHandleSet; ClassHandleSet* lpArrayElemTypesModified; // Bits set indicate the set of sz array element types such that // arrays of that type are modified // in the loop. // Adds the variable liveness information for 'blk' to 'this' LoopDsc void AddVariableLiveness(Compiler* comp, BasicBlock* blk); inline void AddModifiedField(Compiler* comp, CORINFO_FIELD_HANDLE fldHnd, FieldKindForVN fieldKind); // This doesn't *always* take a class handle -- it can also take primitive types, encoded as class handles // (shifted left, with a low-order bit set to distinguish.) // Use the {Encode/Decode}ElemType methods to construct/destruct these. inline void AddModifiedElemType(Compiler* comp, CORINFO_CLASS_HANDLE structHnd); /* The following values are set only for iterator loops, i.e. has the flag LPFLG_ITER set */ GenTree* lpIterTree; // The "i = i <op> const" tree unsigned lpIterVar() const; // iterator variable # int lpIterConst() const; // the constant with which the iterator is incremented genTreeOps lpIterOper() const; // the type of the operation on the iterator (ASG_ADD, ASG_SUB, etc.) void VERIFY_lpIterTree() const; var_types lpIterOperType() const; // For overflow instructions // Set to the block where we found the initialization for LPFLG_CONST_INIT or LPFLG_VAR_INIT loops. // Initially, this will be 'head', but 'head' might change if we insert a loop pre-header block. BasicBlock* lpInitBlock; union { int lpConstInit; // initial constant value of iterator // : Valid if LPFLG_CONST_INIT unsigned lpVarInit; // initial local var number to which we initialize the iterator // : Valid if LPFLG_VAR_INIT }; // The following is for LPFLG_ITER loops only (i.e. the loop condition is "i RELOP const or var") GenTree* lpTestTree; // pointer to the node containing the loop test genTreeOps lpTestOper() const; // the type of the comparison between the iterator and the limit (GT_LE, GT_GE, // etc.) void VERIFY_lpTestTree() const; bool lpIsReversed() const; // true if the iterator node is the second operand in the loop condition GenTree* lpIterator() const; // the iterator node in the loop test GenTree* lpLimit() const; // the limit node in the loop test // Limit constant value of iterator - loop condition is "i RELOP const" // : Valid if LPFLG_CONST_LIMIT int lpConstLimit() const; // The lclVar # in the loop condition ( "i RELOP lclVar" ) // : Valid if LPFLG_VAR_LIMIT unsigned lpVarLimit() const; // The array length in the loop condition ( "i RELOP arr.len" or "i RELOP arr[i][j].len" ) // : Valid if LPFLG_ARRLEN_LIMIT bool lpArrLenLimit(Compiler* comp, ArrIndex* index) const; // Returns "true" iff this is a "top entry" loop. bool lpIsTopEntry() const { if (lpHead->bbNext == lpEntry) { assert(lpHead->bbFallsThrough()); assert(lpTop == lpEntry); return true; } else { return false; } } // Returns "true" iff "*this" contains the blk. bool lpContains(BasicBlock* blk) const { return lpTop->bbNum <= blk->bbNum && blk->bbNum <= lpBottom->bbNum; } // Returns "true" iff "*this" (properly) contains the range [top, bottom] (allowing tops // to be equal, but requiring bottoms to be different.) bool lpContains(BasicBlock* top, BasicBlock* bottom) const { return lpTop->bbNum <= top->bbNum && bottom->bbNum < lpBottom->bbNum; } // Returns "true" iff "*this" (properly) contains "lp2" (allowing tops to be equal, but requiring // bottoms to be different.) bool lpContains(const LoopDsc& lp2) const { return lpContains(lp2.lpTop, lp2.lpBottom); } // Returns "true" iff "*this" is (properly) contained by the range [top, bottom] // (allowing tops to be equal, but requiring bottoms to be different.) bool lpContainedBy(BasicBlock* top, BasicBlock* bottom) const { return top->bbNum <= lpTop->bbNum && lpBottom->bbNum < bottom->bbNum; } // Returns "true" iff "*this" is (properly) contained by "lp2" // (allowing tops to be equal, but requiring bottoms to be different.) bool lpContainedBy(const LoopDsc& lp2) const { return lpContainedBy(lp2.lpTop, lp2.lpBottom); } // Returns "true" iff "*this" is disjoint from the range [top, bottom]. bool lpDisjoint(BasicBlock* top, BasicBlock* bottom) const { return bottom->bbNum < lpTop->bbNum || lpBottom->bbNum < top->bbNum; } // Returns "true" iff "*this" is disjoint from "lp2". bool lpDisjoint(const LoopDsc& lp2) const { return lpDisjoint(lp2.lpTop, lp2.lpBottom); } // Returns "true" iff the loop is well-formed (see code for defn). bool lpWellFormed() const { return lpTop->bbNum <= lpEntry->bbNum && lpEntry->bbNum <= lpBottom->bbNum && (lpHead->bbNum < lpTop->bbNum || lpHead->bbNum > lpBottom->bbNum); } #ifdef DEBUG void lpValidatePreHeader() const { // If this is called, we expect there to be a pre-header. assert(lpFlags & LPFLG_HAS_PREHEAD); // The pre-header must unconditionally enter the loop. assert(lpHead->GetUniqueSucc() == lpEntry); // The loop block must be marked as a pre-header. assert(lpHead->bbFlags & BBF_LOOP_PREHEADER); // The loop entry must have a single non-loop predecessor, which is the pre-header. // We can't assume here that the bbNum are properly ordered, so we can't do a simple lpContained() // check. So, we defer this check, which will be done by `fgDebugCheckLoopTable()`. } #endif // DEBUG // LoopBlocks: convenience method for enabling range-based `for` iteration over all the // blocks in a loop, e.g.: // for (BasicBlock* const block : loop->LoopBlocks()) ... // Currently, the loop blocks are expected to be in linear, lexical, `bbNext` order // from `lpTop` through `lpBottom`, inclusive. All blocks in this range are considered // to be part of the loop. // BasicBlockRangeList LoopBlocks() const { return BasicBlockRangeList(lpTop, lpBottom); } }; protected: bool fgMightHaveLoop(); // returns true if there are any back edges bool fgHasLoops; // True if this method has any loops, set in fgComputeReachability public: LoopDsc* optLoopTable; // loop descriptor table unsigned char optLoopCount; // number of tracked loops unsigned char loopAlignCandidates; // number of loops identified for alignment // Every time we rebuild the loop table, we increase the global "loop epoch". Any loop indices or // loop table pointers from the previous epoch are invalid. // TODO: validate this in some way? unsigned optCurLoopEpoch; void NewLoopEpoch() { ++optCurLoopEpoch; JITDUMP("New loop epoch %d\n", optCurLoopEpoch); } #ifdef DEBUG unsigned char loopsAligned; // number of loops actually aligned #endif // DEBUG bool optRecordLoop(BasicBlock* head, BasicBlock* top, BasicBlock* entry, BasicBlock* bottom, BasicBlock* exit, unsigned char exitCnt); void optClearLoopIterInfo(); #ifdef DEBUG void optPrintLoopInfo(unsigned lnum, bool printVerbose = false); void optPrintLoopInfo(const LoopDsc* loop, bool printVerbose = false); void optPrintLoopTable(); #endif protected: unsigned optCallCount; // number of calls made in the method unsigned optIndirectCallCount; // number of virtual, interface and indirect calls made in the method unsigned optNativeCallCount; // number of Pinvoke/Native calls made in the method unsigned optLoopsCloned; // number of loops cloned in the current method. #ifdef DEBUG void optCheckPreds(); #endif void optResetLoopInfo(); void optFindAndScaleGeneralLoopBlocks(); // Determine if there are any potential loops, and set BBF_LOOP_HEAD on potential loop heads. void optMarkLoopHeads(); void optScaleLoopBlocks(BasicBlock* begBlk, BasicBlock* endBlk); void optUnmarkLoopBlocks(BasicBlock* begBlk, BasicBlock* endBlk); void optUpdateLoopsBeforeRemoveBlock(BasicBlock* block, bool skipUnmarkLoop = false); bool optIsLoopTestEvalIntoTemp(Statement* testStmt, Statement** newTestStmt); unsigned optIsLoopIncrTree(GenTree* incr); bool optCheckIterInLoopTest(unsigned loopInd, GenTree* test, BasicBlock* from, BasicBlock* to, unsigned iterVar); bool optComputeIterInfo(GenTree* incr, BasicBlock* from, BasicBlock* to, unsigned* pIterVar); bool optPopulateInitInfo(unsigned loopInd, BasicBlock* initBlock, GenTree* init, unsigned iterVar); bool optExtractInitTestIncr( BasicBlock* head, BasicBlock* bottom, BasicBlock* exit, GenTree** ppInit, GenTree** ppTest, GenTree** ppIncr); void optFindNaturalLoops(); void optIdentifyLoopsForAlignment(); // Ensures that all the loops in the loop nest rooted at "loopInd" (an index into the loop table) are 'canonical' -- // each loop has a unique "top." Returns "true" iff the flowgraph has been modified. bool optCanonicalizeLoopNest(unsigned char loopInd); // Ensures that the loop "loopInd" (an index into the loop table) is 'canonical' -- it has a unique "top," // unshared with any other loop. Returns "true" iff the flowgraph has been modified bool optCanonicalizeLoop(unsigned char loopInd); // Requires "l1" to be a valid loop table index, and not "BasicBlock::NOT_IN_LOOP". // Requires "l2" to be a valid loop table index, or else "BasicBlock::NOT_IN_LOOP". // Returns true iff "l2" is not NOT_IN_LOOP, and "l1" contains "l2". // A loop contains itself. bool optLoopContains(unsigned l1, unsigned l2) const; // Updates the loop table by changing loop "loopInd", whose head is required // to be "from", to be "to". Also performs this transformation for any // loop nested in "loopInd" that shares the same head as "loopInd". void optUpdateLoopHead(unsigned loopInd, BasicBlock* from, BasicBlock* to); void optRedirectBlock(BasicBlock* blk, BlockToBlockMap* redirectMap, const bool updatePreds = false); // Marks the containsCall information to "lnum" and any parent loops. void AddContainsCallAllContainingLoops(unsigned lnum); // Adds the variable liveness information from 'blk' to "lnum" and any parent loops. void AddVariableLivenessAllContainingLoops(unsigned lnum, BasicBlock* blk); // Adds "fldHnd" to the set of modified fields of "lnum" and any parent loops. void AddModifiedFieldAllContainingLoops(unsigned lnum, CORINFO_FIELD_HANDLE fldHnd, FieldKindForVN fieldKind); // Adds "elemType" to the set of modified array element types of "lnum" and any parent loops. void AddModifiedElemTypeAllContainingLoops(unsigned lnum, CORINFO_CLASS_HANDLE elemType); // Requires that "from" and "to" have the same "bbJumpKind" (perhaps because "to" is a clone // of "from".) Copies the jump destination from "from" to "to". void optCopyBlkDest(BasicBlock* from, BasicBlock* to); // Returns true if 'block' is an entry block for any loop in 'optLoopTable' bool optIsLoopEntry(BasicBlock* block) const; // The depth of the loop described by "lnum" (an index into the loop table.) (0 == top level) unsigned optLoopDepth(unsigned lnum) { assert(lnum < optLoopCount); unsigned depth = 0; while ((lnum = optLoopTable[lnum].lpParent) != BasicBlock::NOT_IN_LOOP) { ++depth; } return depth; } // Struct used in optInvertWhileLoop to count interesting constructs to boost the profitability score. struct OptInvertCountTreeInfoType { int sharedStaticHelperCount; int arrayLengthCount; }; static fgWalkResult optInvertCountTreeInfo(GenTree** pTree, fgWalkData* data); bool optInvertWhileLoop(BasicBlock* block); private: static bool optIterSmallOverflow(int iterAtExit, var_types incrType); static bool optIterSmallUnderflow(int iterAtExit, var_types decrType); bool optComputeLoopRep(int constInit, int constLimit, int iterInc, genTreeOps iterOper, var_types iterType, genTreeOps testOper, bool unsignedTest, bool dupCond, unsigned* iterCount); static fgWalkPreFn optIsVarAssgCB; protected: bool optIsVarAssigned(BasicBlock* beg, BasicBlock* end, GenTree* skip, unsigned var); bool optIsVarAssgLoop(unsigned lnum, unsigned var); int optIsSetAssgLoop(unsigned lnum, ALLVARSET_VALARG_TP vars, varRefKinds inds = VR_NONE); bool optNarrowTree(GenTree* tree, var_types srct, var_types dstt, ValueNumPair vnpNarrow, bool doit); protected: // The following is the upper limit on how many expressions we'll keep track // of for the CSE analysis. // static const unsigned MAX_CSE_CNT = EXPSET_SZ; static const int MIN_CSE_COST = 2; // BitVec trait information only used by the optCSE_canSwap() method, for the CSE_defMask and CSE_useMask. // This BitVec uses one bit per CSE candidate BitVecTraits* cseMaskTraits; // one bit per CSE candidate // BitVec trait information for computing CSE availability using the CSE_DataFlow algorithm. // Two bits are allocated per CSE candidate to compute CSE availability // plus an extra bit to handle the initial unvisited case. // (See CSE_DataFlow::EndMerge for an explanation of why this is necessary.) // // The two bits per CSE candidate have the following meanings: // 11 - The CSE is available, and is also available when considering calls as killing availability. // 10 - The CSE is available, but is not available when considering calls as killing availability. // 00 - The CSE is not available // 01 - An illegal combination // BitVecTraits* cseLivenessTraits; //----------------------------------------------------------------------------------------------------------------- // getCSEnum2bit: Return the normalized index to use in the EXPSET_TP for the CSE with the given CSE index. // Each GenTree has a `gtCSEnum` field. Zero is reserved to mean this node is not a CSE, positive values indicate // CSE uses, and negative values indicate CSE defs. The caller must pass a non-zero positive value, as from // GET_CSE_INDEX(). // static unsigned genCSEnum2bit(unsigned CSEnum) { assert((CSEnum > 0) && (CSEnum <= MAX_CSE_CNT)); return CSEnum - 1; } //----------------------------------------------------------------------------------------------------------------- // getCSEAvailBit: Return the bit used by CSE dataflow sets (bbCseGen, etc.) for the availability bit for a CSE. // static unsigned getCSEAvailBit(unsigned CSEnum) { return genCSEnum2bit(CSEnum) * 2; } //----------------------------------------------------------------------------------------------------------------- // getCSEAvailCrossCallBit: Return the bit used by CSE dataflow sets (bbCseGen, etc.) for the availability bit // for a CSE considering calls as killing availability bit (see description above). // static unsigned getCSEAvailCrossCallBit(unsigned CSEnum) { return getCSEAvailBit(CSEnum) + 1; } void optPrintCSEDataFlowSet(EXPSET_VALARG_TP cseDataFlowSet, bool includeBits = true); EXPSET_TP cseCallKillsMask; // Computed once - A mask that is used to kill available CSEs at callsites /* Generic list of nodes - used by the CSE logic */ struct treeLst { treeLst* tlNext; GenTree* tlTree; }; struct treeStmtLst { treeStmtLst* tslNext; GenTree* tslTree; // tree node Statement* tslStmt; // statement containing the tree BasicBlock* tslBlock; // block containing the statement }; // The following logic keeps track of expressions via a simple hash table. struct CSEdsc { CSEdsc* csdNextInBucket; // used by the hash table size_t csdHashKey; // the orginal hashkey ssize_t csdConstDefValue; // When we CSE similar constants, this is the value that we use as the def ValueNum csdConstDefVN; // When we CSE similar constants, this is the ValueNumber that we use for the LclVar // assignment unsigned csdIndex; // 1..optCSECandidateCount bool csdIsSharedConst; // true if this CSE is a shared const bool csdLiveAcrossCall; unsigned short csdDefCount; // definition count unsigned short csdUseCount; // use count (excluding the implicit uses at defs) weight_t csdDefWtCnt; // weighted def count weight_t csdUseWtCnt; // weighted use count (excluding the implicit uses at defs) GenTree* csdTree; // treenode containing the 1st occurrence Statement* csdStmt; // stmt containing the 1st occurrence BasicBlock* csdBlock; // block containing the 1st occurrence treeStmtLst* csdTreeList; // list of matching tree nodes: head treeStmtLst* csdTreeLast; // list of matching tree nodes: tail // ToDo: This can be removed when gtGetStructHandleIfPresent stops guessing // and GT_IND nodes always have valid struct handle. // CORINFO_CLASS_HANDLE csdStructHnd; // The class handle, currently needed to create a SIMD LclVar in PerformCSE bool csdStructHndMismatch; ValueNum defExcSetPromise; // The exception set that is now required for all defs of this CSE. // This will be set to NoVN if we decide to abandon this CSE ValueNum defExcSetCurrent; // The set of exceptions we currently can use for CSE uses. ValueNum defConservNormVN; // if all def occurrences share the same conservative normal value // number, this will reflect it; otherwise, NoVN. // not used for shared const CSE's }; static const size_t s_optCSEhashSizeInitial; static const size_t s_optCSEhashGrowthFactor; static const size_t s_optCSEhashBucketSize; size_t optCSEhashSize; // The current size of hashtable size_t optCSEhashCount; // Number of entries in hashtable size_t optCSEhashMaxCountBeforeResize; // Number of entries before resize CSEdsc** optCSEhash; CSEdsc** optCSEtab; typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, GenTree*> NodeToNodeMap; NodeToNodeMap* optCseCheckedBoundMap; // Maps bound nodes to ancestor compares that should be // re-numbered with the bound to improve range check elimination // Given a compare, look for a cse candidate checked bound feeding it and add a map entry if found. void optCseUpdateCheckedBoundMap(GenTree* compare); void optCSEstop(); CSEdsc* optCSEfindDsc(unsigned index); bool optUnmarkCSE(GenTree* tree); // user defined callback data for the tree walk function optCSE_MaskHelper() struct optCSE_MaskData { EXPSET_TP CSE_defMask; EXPSET_TP CSE_useMask; }; // Treewalk helper for optCSE_DefMask and optCSE_UseMask static fgWalkPreFn optCSE_MaskHelper; // This function walks all the node for an given tree // and return the mask of CSE definitions and uses for the tree // void optCSE_GetMaskData(GenTree* tree, optCSE_MaskData* pMaskData); // Given a binary tree node return true if it is safe to swap the order of evaluation for op1 and op2. bool optCSE_canSwap(GenTree* firstNode, GenTree* secondNode); struct optCSEcostCmpEx { bool operator()(const CSEdsc* op1, const CSEdsc* op2); }; struct optCSEcostCmpSz { bool operator()(const CSEdsc* op1, const CSEdsc* op2); }; void optCleanupCSEs(); #ifdef DEBUG void optEnsureClearCSEInfo(); #endif // DEBUG static bool Is_Shared_Const_CSE(size_t key) { return ((key & TARGET_SIGN_BIT) != 0); } // returns the encoded key static size_t Encode_Shared_Const_CSE_Value(size_t key) { return TARGET_SIGN_BIT | (key >> CSE_CONST_SHARED_LOW_BITS); } // returns the orginal key static size_t Decode_Shared_Const_CSE_Value(size_t enckey) { assert(Is_Shared_Const_CSE(enckey)); return (enckey & ~TARGET_SIGN_BIT) << CSE_CONST_SHARED_LOW_BITS; } /************************************************************************** * Value Number based CSEs *************************************************************************/ // String to use for formatting CSE numbers. Note that this is the positive number, e.g., from GET_CSE_INDEX(). #define FMT_CSE "CSE #%02u" public: void optOptimizeValnumCSEs(); protected: void optValnumCSE_Init(); unsigned optValnumCSE_Index(GenTree* tree, Statement* stmt); bool optValnumCSE_Locate(); void optValnumCSE_InitDataFlow(); void optValnumCSE_DataFlow(); void optValnumCSE_Availablity(); void optValnumCSE_Heuristic(); bool optDoCSE; // True when we have found a duplicate CSE tree bool optValnumCSE_phase; // True when we are executing the optOptimizeValnumCSEs() phase unsigned optCSECandidateCount; // Count of CSE's candidates unsigned optCSEstart; // The first local variable number that is a CSE unsigned optCSEcount; // The total count of CSE's introduced. weight_t optCSEweight; // The weight of the current block when we are doing PerformCSE bool optIsCSEcandidate(GenTree* tree); // lclNumIsTrueCSE returns true if the LclVar was introduced by the CSE phase of the compiler // bool lclNumIsTrueCSE(unsigned lclNum) const { return ((optCSEcount > 0) && (lclNum >= optCSEstart) && (lclNum < optCSEstart + optCSEcount)); } // lclNumIsCSE returns true if the LclVar should be treated like a CSE with regards to constant prop. // bool lclNumIsCSE(unsigned lclNum) const { return lvaGetDesc(lclNum)->lvIsCSE; } #ifdef DEBUG bool optConfigDisableCSE(); bool optConfigDisableCSE2(); #endif void optOptimizeCSEs(); struct isVarAssgDsc { GenTree* ivaSkip; ALLVARSET_TP ivaMaskVal; // Set of variables assigned to. This is a set of all vars, not tracked vars. #ifdef DEBUG void* ivaSelf; #endif unsigned ivaVar; // Variable we are interested in, or -1 varRefKinds ivaMaskInd; // What kind of indirect assignments are there? callInterf ivaMaskCall; // What kind of calls are there? bool ivaMaskIncomplete; // Variables not representable in ivaMaskVal were assigned to. }; static callInterf optCallInterf(GenTreeCall* call); public: // VN based copy propagation. // In DEBUG builds, we'd like to know the tree that the SSA definition was pushed for. // While for ordinary SSA defs it will be available (as an ASG) in the SSA descriptor, // for locals which will use "definitions from uses", it will not be, so we store it // in this class instead. class CopyPropSsaDef { LclSsaVarDsc* m_ssaDef; #ifdef DEBUG GenTree* m_defNode; #endif public: CopyPropSsaDef(LclSsaVarDsc* ssaDef, GenTree* defNode) : m_ssaDef(ssaDef) #ifdef DEBUG , m_defNode(defNode) #endif { } LclSsaVarDsc* GetSsaDef() const { return m_ssaDef; } #ifdef DEBUG GenTree* GetDefNode() const { return m_defNode; } #endif }; typedef ArrayStack<CopyPropSsaDef> CopyPropSsaDefStack; typedef JitHashTable<unsigned, JitSmallPrimitiveKeyFuncs<unsigned>, CopyPropSsaDefStack*> LclNumToLiveDefsMap; // Copy propagation functions. void optCopyProp(Statement* stmt, GenTreeLclVarCommon* tree, unsigned lclNum, LclNumToLiveDefsMap* curSsaName); void optBlockCopyPropPopStacks(BasicBlock* block, LclNumToLiveDefsMap* curSsaName); void optBlockCopyProp(BasicBlock* block, LclNumToLiveDefsMap* curSsaName); void optCopyPropPushDef(GenTreeOp* asg, GenTreeLclVarCommon* lclNode, unsigned lclNum, LclNumToLiveDefsMap* curSsaName); unsigned optIsSsaLocal(GenTreeLclVarCommon* lclNode); int optCopyProp_LclVarScore(const LclVarDsc* lclVarDsc, const LclVarDsc* copyVarDsc, bool preferOp2); void optVnCopyProp(); INDEBUG(void optDumpCopyPropStack(LclNumToLiveDefsMap* curSsaName)); /************************************************************************** * Early value propagation *************************************************************************/ struct SSAName { unsigned m_lvNum; unsigned m_ssaNum; SSAName(unsigned lvNum, unsigned ssaNum) : m_lvNum(lvNum), m_ssaNum(ssaNum) { } static unsigned GetHashCode(SSAName ssaNm) { return (ssaNm.m_lvNum << 16) | (ssaNm.m_ssaNum); } static bool Equals(SSAName ssaNm1, SSAName ssaNm2) { return (ssaNm1.m_lvNum == ssaNm2.m_lvNum) && (ssaNm1.m_ssaNum == ssaNm2.m_ssaNum); } }; #define OMF_HAS_NEWARRAY 0x00000001 // Method contains 'new' of an array #define OMF_HAS_NEWOBJ 0x00000002 // Method contains 'new' of an object type. #define OMF_HAS_ARRAYREF 0x00000004 // Method contains array element loads or stores. #define OMF_HAS_NULLCHECK 0x00000008 // Method contains null check. #define OMF_HAS_FATPOINTER 0x00000010 // Method contains call, that needs fat pointer transformation. #define OMF_HAS_OBJSTACKALLOC 0x00000020 // Method contains an object allocated on the stack. #define OMF_HAS_GUARDEDDEVIRT 0x00000040 // Method contains guarded devirtualization candidate #define OMF_HAS_EXPRUNTIMELOOKUP 0x00000080 // Method contains a runtime lookup to an expandable dictionary. #define OMF_HAS_PATCHPOINT 0x00000100 // Method contains patchpoints #define OMF_NEEDS_GCPOLLS 0x00000200 // Method needs GC polls #define OMF_HAS_FROZEN_STRING 0x00000400 // Method has a frozen string (REF constant int), currently only on CoreRT. #define OMF_HAS_PARTIAL_COMPILATION_PATCHPOINT 0x00000800 // Method contains partial compilation patchpoints #define OMF_HAS_TAILCALL_SUCCESSOR 0x00001000 // Method has potential tail call in a non BBJ_RETURN block bool doesMethodHaveFatPointer() { return (optMethodFlags & OMF_HAS_FATPOINTER) != 0; } void setMethodHasFatPointer() { optMethodFlags |= OMF_HAS_FATPOINTER; } void clearMethodHasFatPointer() { optMethodFlags &= ~OMF_HAS_FATPOINTER; } void addFatPointerCandidate(GenTreeCall* call); bool doesMethodHaveFrozenString() const { return (optMethodFlags & OMF_HAS_FROZEN_STRING) != 0; } void setMethodHasFrozenString() { optMethodFlags |= OMF_HAS_FROZEN_STRING; } bool doesMethodHaveGuardedDevirtualization() const { return (optMethodFlags & OMF_HAS_GUARDEDDEVIRT) != 0; } void setMethodHasGuardedDevirtualization() { optMethodFlags |= OMF_HAS_GUARDEDDEVIRT; } void clearMethodHasGuardedDevirtualization() { optMethodFlags &= ~OMF_HAS_GUARDEDDEVIRT; } void considerGuardedDevirtualization(GenTreeCall* call, IL_OFFSET ilOffset, bool isInterface, CORINFO_METHOD_HANDLE baseMethod, CORINFO_CLASS_HANDLE baseClass, CORINFO_CONTEXT_HANDLE* pContextHandle DEBUGARG(CORINFO_CLASS_HANDLE objClass) DEBUGARG(const char* objClassName)); void addGuardedDevirtualizationCandidate(GenTreeCall* call, CORINFO_METHOD_HANDLE methodHandle, CORINFO_CLASS_HANDLE classHandle, unsigned methodAttr, unsigned classAttr, unsigned likelihood); bool doesMethodHaveExpRuntimeLookup() { return (optMethodFlags & OMF_HAS_EXPRUNTIMELOOKUP) != 0; } void setMethodHasExpRuntimeLookup() { optMethodFlags |= OMF_HAS_EXPRUNTIMELOOKUP; } void clearMethodHasExpRuntimeLookup() { optMethodFlags &= ~OMF_HAS_EXPRUNTIMELOOKUP; } void addExpRuntimeLookupCandidate(GenTreeCall* call); bool doesMethodHavePatchpoints() { return (optMethodFlags & OMF_HAS_PATCHPOINT) != 0; } void setMethodHasPatchpoint() { optMethodFlags |= OMF_HAS_PATCHPOINT; } bool doesMethodHavePartialCompilationPatchpoints() { return (optMethodFlags & OMF_HAS_PARTIAL_COMPILATION_PATCHPOINT) != 0; } void setMethodHasPartialCompilationPatchpoint() { optMethodFlags |= OMF_HAS_PARTIAL_COMPILATION_PATCHPOINT; } unsigned optMethodFlags; bool doesMethodHaveNoReturnCalls() { return optNoReturnCallCount > 0; } void setMethodHasNoReturnCalls() { optNoReturnCallCount++; } unsigned optNoReturnCallCount; // Recursion bound controls how far we can go backwards tracking for a SSA value. // No throughput diff was found with backward walk bound between 3-8. static const int optEarlyPropRecurBound = 5; enum class optPropKind { OPK_INVALID, OPK_ARRAYLEN, OPK_NULLCHECK }; typedef JitHashTable<unsigned, JitSmallPrimitiveKeyFuncs<unsigned>, GenTree*> LocalNumberToNullCheckTreeMap; GenTree* getArrayLengthFromAllocation(GenTree* tree DEBUGARG(BasicBlock* block)); GenTree* optPropGetValueRec(unsigned lclNum, unsigned ssaNum, optPropKind valueKind, int walkDepth); GenTree* optPropGetValue(unsigned lclNum, unsigned ssaNum, optPropKind valueKind); GenTree* optEarlyPropRewriteTree(GenTree* tree, LocalNumberToNullCheckTreeMap* nullCheckMap); bool optDoEarlyPropForBlock(BasicBlock* block); bool optDoEarlyPropForFunc(); void optEarlyProp(); void optFoldNullCheck(GenTree* tree, LocalNumberToNullCheckTreeMap* nullCheckMap); GenTree* optFindNullCheckToFold(GenTree* tree, LocalNumberToNullCheckTreeMap* nullCheckMap); bool optIsNullCheckFoldingLegal(GenTree* tree, GenTree* nullCheckTree, GenTree** nullCheckParent, Statement** nullCheckStmt); bool optCanMoveNullCheckPastTree(GenTree* tree, unsigned nullCheckLclNum, bool isInsideTry, bool checkSideEffectSummary); #if DEBUG void optCheckFlagsAreSet(unsigned methodFlag, const char* methodFlagStr, unsigned bbFlag, const char* bbFlagStr, GenTree* tree, BasicBlock* basicBlock); #endif // Redundant branch opts // PhaseStatus optRedundantBranches(); bool optRedundantRelop(BasicBlock* const block); bool optRedundantBranch(BasicBlock* const block); bool optJumpThread(BasicBlock* const block, BasicBlock* const domBlock, bool domIsSameRelop); bool optReachable(BasicBlock* const fromBlock, BasicBlock* const toBlock, BasicBlock* const excludedBlock); /************************************************************************** * Value/Assertion propagation *************************************************************************/ public: // Data structures for assertion prop BitVecTraits* apTraits; ASSERT_TP apFull; enum optAssertionKind { OAK_INVALID, OAK_EQUAL, OAK_NOT_EQUAL, OAK_SUBRANGE, OAK_NO_THROW, OAK_COUNT }; enum optOp1Kind { O1K_INVALID, O1K_LCLVAR, O1K_ARR_BND, O1K_BOUND_OPER_BND, O1K_BOUND_LOOP_BND, O1K_CONSTANT_LOOP_BND, O1K_CONSTANT_LOOP_BND_UN, O1K_EXACT_TYPE, O1K_SUBTYPE, O1K_VALUE_NUMBER, O1K_COUNT }; enum optOp2Kind { O2K_INVALID, O2K_LCLVAR_COPY, O2K_IND_CNS_INT, O2K_CONST_INT, O2K_CONST_LONG, O2K_CONST_DOUBLE, O2K_ZEROOBJ, O2K_SUBRANGE, O2K_COUNT }; struct AssertionDsc { optAssertionKind assertionKind; struct SsaVar { unsigned lclNum; // assigned to or property of this local var number unsigned ssaNum; }; struct ArrBnd { ValueNum vnIdx; ValueNum vnLen; }; struct AssertionDscOp1 { optOp1Kind kind; // a normal LclVar, or Exact-type or Subtype ValueNum vn; union { SsaVar lcl; ArrBnd bnd; }; } op1; struct AssertionDscOp2 { optOp2Kind kind; // a const or copy assignment ValueNum vn; struct IntVal { ssize_t iconVal; // integer #if !defined(HOST_64BIT) unsigned padding; // unused; ensures iconFlags does not overlap lconVal #endif GenTreeFlags iconFlags; // gtFlags }; union { struct { SsaVar lcl; FieldSeqNode* zeroOffsetFieldSeq; }; IntVal u1; __int64 lconVal; double dconVal; IntegralRange u2; }; } op2; bool IsCheckedBoundArithBound() { return ((assertionKind == OAK_EQUAL || assertionKind == OAK_NOT_EQUAL) && op1.kind == O1K_BOUND_OPER_BND); } bool IsCheckedBoundBound() { return ((assertionKind == OAK_EQUAL || assertionKind == OAK_NOT_EQUAL) && op1.kind == O1K_BOUND_LOOP_BND); } bool IsConstantBound() { return ((assertionKind == OAK_EQUAL || assertionKind == OAK_NOT_EQUAL) && (op1.kind == O1K_CONSTANT_LOOP_BND)); } bool IsConstantBoundUnsigned() { return ((assertionKind == OAK_EQUAL || assertionKind == OAK_NOT_EQUAL) && (op1.kind == O1K_CONSTANT_LOOP_BND_UN)); } bool IsBoundsCheckNoThrow() { return ((assertionKind == OAK_NO_THROW) && (op1.kind == O1K_ARR_BND)); } bool IsCopyAssertion() { return ((assertionKind == OAK_EQUAL) && (op1.kind == O1K_LCLVAR) && (op2.kind == O2K_LCLVAR_COPY)); } bool IsConstantInt32Assertion() { return ((assertionKind == OAK_EQUAL) || (assertionKind == OAK_NOT_EQUAL)) && (op2.kind == O2K_CONST_INT); } static bool SameKind(AssertionDsc* a1, AssertionDsc* a2) { return a1->assertionKind == a2->assertionKind && a1->op1.kind == a2->op1.kind && a1->op2.kind == a2->op2.kind; } static bool ComplementaryKind(optAssertionKind kind, optAssertionKind kind2) { if (kind == OAK_EQUAL) { return kind2 == OAK_NOT_EQUAL; } else if (kind == OAK_NOT_EQUAL) { return kind2 == OAK_EQUAL; } return false; } bool HasSameOp1(AssertionDsc* that, bool vnBased) { if (op1.kind != that->op1.kind) { return false; } else if (op1.kind == O1K_ARR_BND) { assert(vnBased); return (op1.bnd.vnIdx == that->op1.bnd.vnIdx) && (op1.bnd.vnLen == that->op1.bnd.vnLen); } else { return ((vnBased && (op1.vn == that->op1.vn)) || (!vnBased && (op1.lcl.lclNum == that->op1.lcl.lclNum))); } } bool HasSameOp2(AssertionDsc* that, bool vnBased) { if (op2.kind != that->op2.kind) { return false; } switch (op2.kind) { case O2K_IND_CNS_INT: case O2K_CONST_INT: return ((op2.u1.iconVal == that->op2.u1.iconVal) && (op2.u1.iconFlags == that->op2.u1.iconFlags)); case O2K_CONST_LONG: return (op2.lconVal == that->op2.lconVal); case O2K_CONST_DOUBLE: // exact match because of positive and negative zero. return (memcmp(&op2.dconVal, &that->op2.dconVal, sizeof(double)) == 0); case O2K_ZEROOBJ: return true; case O2K_LCLVAR_COPY: return (op2.lcl.lclNum == that->op2.lcl.lclNum) && (!vnBased || op2.lcl.ssaNum == that->op2.lcl.ssaNum) && (op2.zeroOffsetFieldSeq == that->op2.zeroOffsetFieldSeq); case O2K_SUBRANGE: return op2.u2.Equals(that->op2.u2); case O2K_INVALID: // we will return false break; default: assert(!"Unexpected value for op2.kind in AssertionDsc."); break; } return false; } bool Complementary(AssertionDsc* that, bool vnBased) { return ComplementaryKind(assertionKind, that->assertionKind) && HasSameOp1(that, vnBased) && HasSameOp2(that, vnBased); } bool Equals(AssertionDsc* that, bool vnBased) { if (assertionKind != that->assertionKind) { return false; } else if (assertionKind == OAK_NO_THROW) { assert(op2.kind == O2K_INVALID); return HasSameOp1(that, vnBased); } else { return HasSameOp1(that, vnBased) && HasSameOp2(that, vnBased); } } }; protected: static fgWalkPreFn optAddCopiesCallback; static fgWalkPreFn optVNAssertionPropCurStmtVisitor; unsigned optAddCopyLclNum; GenTree* optAddCopyAsgnNode; bool optLocalAssertionProp; // indicates that we are performing local assertion prop bool optAssertionPropagated; // set to true if we modified the trees bool optAssertionPropagatedCurrentStmt; #ifdef DEBUG GenTree* optAssertionPropCurrentTree; #endif AssertionIndex* optComplementaryAssertionMap; JitExpandArray<ASSERT_TP>* optAssertionDep; // table that holds dependent assertions (assertions // using the value of a local var) for each local var AssertionDsc* optAssertionTabPrivate; // table that holds info about value assignments AssertionIndex optAssertionCount; // total number of assertions in the assertion table AssertionIndex optMaxAssertionCount; public: void optVnNonNullPropCurStmt(BasicBlock* block, Statement* stmt, GenTree* tree); fgWalkResult optVNConstantPropCurStmt(BasicBlock* block, Statement* stmt, GenTree* tree); GenTree* optVNConstantPropOnJTrue(BasicBlock* block, GenTree* test); GenTree* optVNConstantPropOnTree(BasicBlock* block, GenTree* tree); GenTree* optExtractSideEffListFromConst(GenTree* tree); AssertionIndex GetAssertionCount() { return optAssertionCount; } ASSERT_TP* bbJtrueAssertionOut; typedef JitHashTable<ValueNum, JitSmallPrimitiveKeyFuncs<ValueNum>, ASSERT_TP> ValueNumToAssertsMap; ValueNumToAssertsMap* optValueNumToAsserts; // Assertion prop helpers. ASSERT_TP& GetAssertionDep(unsigned lclNum); AssertionDsc* optGetAssertion(AssertionIndex assertIndex); void optAssertionInit(bool isLocalProp); void optAssertionTraitsInit(AssertionIndex assertionCount); void optAssertionReset(AssertionIndex limit); void optAssertionRemove(AssertionIndex index); // Assertion prop data flow functions. void optAssertionPropMain(); Statement* optVNAssertionPropCurStmt(BasicBlock* block, Statement* stmt); bool optIsTreeKnownIntValue(bool vnBased, GenTree* tree, ssize_t* pConstant, GenTreeFlags* pIconFlags); ASSERT_TP* optInitAssertionDataflowFlags(); ASSERT_TP* optComputeAssertionGen(); // Assertion Gen functions. void optAssertionGen(GenTree* tree); AssertionIndex optAssertionGenCast(GenTreeCast* cast); AssertionIndex optAssertionGenPhiDefn(GenTree* tree); AssertionInfo optCreateJTrueBoundsAssertion(GenTree* tree); AssertionInfo optAssertionGenJtrue(GenTree* tree); AssertionIndex optCreateJtrueAssertions(GenTree* op1, GenTree* op2, Compiler::optAssertionKind assertionKind, bool helperCallArgs = false); AssertionIndex optFindComplementary(AssertionIndex assertionIndex); void optMapComplementary(AssertionIndex assertionIndex, AssertionIndex index); // Assertion creation functions. AssertionIndex optCreateAssertion(GenTree* op1, GenTree* op2, optAssertionKind assertionKind, bool helperCallArgs = false); AssertionIndex optFinalizeCreatingAssertion(AssertionDsc* assertion); bool optTryExtractSubrangeAssertion(GenTree* source, IntegralRange* pRange); void optCreateComplementaryAssertion(AssertionIndex assertionIndex, GenTree* op1, GenTree* op2, bool helperCallArgs = false); bool optAssertionVnInvolvesNan(AssertionDsc* assertion); AssertionIndex optAddAssertion(AssertionDsc* assertion); void optAddVnAssertionMapping(ValueNum vn, AssertionIndex index); #ifdef DEBUG void optPrintVnAssertionMapping(); #endif ASSERT_TP optGetVnMappedAssertions(ValueNum vn); // Used for respective assertion propagations. AssertionIndex optAssertionIsSubrange(GenTree* tree, IntegralRange range, ASSERT_VALARG_TP assertions); AssertionIndex optAssertionIsSubtype(GenTree* tree, GenTree* methodTableArg, ASSERT_VALARG_TP assertions); AssertionIndex optAssertionIsNonNullInternal(GenTree* op, ASSERT_VALARG_TP assertions DEBUGARG(bool* pVnBased)); bool optAssertionIsNonNull(GenTree* op, ASSERT_VALARG_TP assertions DEBUGARG(bool* pVnBased) DEBUGARG(AssertionIndex* pIndex)); AssertionIndex optGlobalAssertionIsEqualOrNotEqual(ASSERT_VALARG_TP assertions, GenTree* op1, GenTree* op2); AssertionIndex optGlobalAssertionIsEqualOrNotEqualZero(ASSERT_VALARG_TP assertions, GenTree* op1); AssertionIndex optLocalAssertionIsEqualOrNotEqual( optOp1Kind op1Kind, unsigned lclNum, optOp2Kind op2Kind, ssize_t cnsVal, ASSERT_VALARG_TP assertions); // Assertion prop for lcl var functions. bool optAssertionProp_LclVarTypeCheck(GenTree* tree, LclVarDsc* lclVarDsc, LclVarDsc* copyVarDsc); GenTree* optCopyAssertionProp(AssertionDsc* curAssertion, GenTreeLclVarCommon* tree, Statement* stmt DEBUGARG(AssertionIndex index)); GenTree* optConstantAssertionProp(AssertionDsc* curAssertion, GenTreeLclVarCommon* tree, Statement* stmt DEBUGARG(AssertionIndex index)); bool optZeroObjAssertionProp(GenTree* tree, ASSERT_VALARG_TP assertions); // Assertion propagation functions. GenTree* optAssertionProp(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt, BasicBlock* block); GenTree* optAssertionProp_LclVar(ASSERT_VALARG_TP assertions, GenTreeLclVarCommon* tree, Statement* stmt); GenTree* optAssertionProp_Asg(ASSERT_VALARG_TP assertions, GenTreeOp* asg, Statement* stmt); GenTree* optAssertionProp_Return(ASSERT_VALARG_TP assertions, GenTreeUnOp* ret, Statement* stmt); GenTree* optAssertionProp_Ind(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt); GenTree* optAssertionProp_Cast(ASSERT_VALARG_TP assertions, GenTreeCast* cast, Statement* stmt); GenTree* optAssertionProp_Call(ASSERT_VALARG_TP assertions, GenTreeCall* call, Statement* stmt); GenTree* optAssertionProp_RelOp(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt); GenTree* optAssertionProp_Comma(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt); GenTree* optAssertionProp_BndsChk(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt); GenTree* optAssertionPropGlobal_RelOp(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt); GenTree* optAssertionPropLocal_RelOp(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt); GenTree* optAssertionProp_Update(GenTree* newTree, GenTree* tree, Statement* stmt); GenTree* optNonNullAssertionProp_Call(ASSERT_VALARG_TP assertions, GenTreeCall* call); // Implied assertion functions. void optImpliedAssertions(AssertionIndex assertionIndex, ASSERT_TP& activeAssertions); void optImpliedByTypeOfAssertions(ASSERT_TP& activeAssertions); void optImpliedByCopyAssertion(AssertionDsc* copyAssertion, AssertionDsc* depAssertion, ASSERT_TP& result); void optImpliedByConstAssertion(AssertionDsc* curAssertion, ASSERT_TP& result); #ifdef DEBUG void optPrintAssertion(AssertionDsc* newAssertion, AssertionIndex assertionIndex = 0); void optPrintAssertionIndex(AssertionIndex index); void optPrintAssertionIndices(ASSERT_TP assertions); void optDebugCheckAssertion(AssertionDsc* assertion); void optDebugCheckAssertions(AssertionIndex AssertionIndex); #endif static void optDumpAssertionIndices(const char* header, ASSERT_TP assertions, const char* footer = nullptr); static void optDumpAssertionIndices(ASSERT_TP assertions, const char* footer = nullptr); void optAddCopies(); /************************************************************************** * Range checks *************************************************************************/ public: struct LoopCloneVisitorInfo { LoopCloneContext* context; unsigned loopNum; Statement* stmt; LoopCloneVisitorInfo(LoopCloneContext* context, unsigned loopNum, Statement* stmt) : context(context), loopNum(loopNum), stmt(nullptr) { } }; bool optIsStackLocalInvariant(unsigned loopNum, unsigned lclNum); bool optExtractArrIndex(GenTree* tree, ArrIndex* result, unsigned lhsNum); bool optReconstructArrIndex(GenTree* tree, ArrIndex* result, unsigned lhsNum); bool optIdentifyLoopOptInfo(unsigned loopNum, LoopCloneContext* context); static fgWalkPreFn optCanOptimizeByLoopCloningVisitor; fgWalkResult optCanOptimizeByLoopCloning(GenTree* tree, LoopCloneVisitorInfo* info); bool optObtainLoopCloningOpts(LoopCloneContext* context); bool optIsLoopClonable(unsigned loopInd); bool optLoopCloningEnabled(); #ifdef DEBUG void optDebugLogLoopCloning(BasicBlock* block, Statement* insertBefore); #endif void optPerformStaticOptimizations(unsigned loopNum, LoopCloneContext* context DEBUGARG(bool fastPath)); bool optComputeDerefConditions(unsigned loopNum, LoopCloneContext* context); bool optDeriveLoopCloningConditions(unsigned loopNum, LoopCloneContext* context); BasicBlock* optInsertLoopChoiceConditions(LoopCloneContext* context, unsigned loopNum, BasicBlock* slowHead, BasicBlock* insertAfter); protected: ssize_t optGetArrayRefScaleAndIndex(GenTree* mul, GenTree** pIndex DEBUGARG(bool bRngChk)); bool optReachWithoutCall(BasicBlock* srcBB, BasicBlock* dstBB); protected: bool optLoopsMarked; /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX RegAlloc XX XX XX XX Does the register allocation and puts the remaining lclVars on the stack XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: regNumber raUpdateRegStateForArg(RegState* regState, LclVarDsc* argDsc); void raMarkStkVars(); #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE #if defined(TARGET_AMD64) static bool varTypeNeedsPartialCalleeSave(var_types type) { assert(type != TYP_STRUCT); return (type == TYP_SIMD32); } #elif defined(TARGET_ARM64) static bool varTypeNeedsPartialCalleeSave(var_types type) { assert(type != TYP_STRUCT); // ARM64 ABI FP Callee save registers only require Callee to save lower 8 Bytes // For SIMD types longer than 8 bytes Caller is responsible for saving and restoring Upper bytes. return ((type == TYP_SIMD16) || (type == TYP_SIMD12)); } #else // !defined(TARGET_AMD64) && !defined(TARGET_ARM64) #error("Unknown target architecture for FEATURE_SIMD") #endif // !defined(TARGET_AMD64) && !defined(TARGET_ARM64) #endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE protected: // Some things are used by both LSRA and regpredict allocators. FrameType rpFrameType; bool rpMustCreateEBPCalled; // Set to true after we have called rpMustCreateEBPFrame once bool rpMustCreateEBPFrame(INDEBUG(const char** wbReason)); private: Lowering* m_pLowering; // Lowering; needed to Lower IR that's added or modified after Lowering. LinearScanInterface* m_pLinearScan; // Linear Scan allocator /* raIsVarargsStackArg is called by raMaskStkVars and by lvaComputeRefCounts. It identifies the special case where a varargs function has a parameter passed on the stack, other than the special varargs handle. Such parameters require special treatment, because they cannot be tracked by the GC (their offsets in the stack are not known at compile time). */ bool raIsVarargsStackArg(unsigned lclNum) { #ifdef TARGET_X86 LclVarDsc* varDsc = lvaGetDesc(lclNum); assert(varDsc->lvIsParam); return (info.compIsVarArgs && !varDsc->lvIsRegArg && (lclNum != lvaVarargsHandleArg)); #else // TARGET_X86 return false; #endif // TARGET_X86 } /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX EEInterface XX XX XX XX Get to the class and method info from the Execution Engine given XX XX tokens for the class and method XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: // Get handles void eeGetCallInfo(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedToken, CORINFO_CALLINFO_FLAGS flags, CORINFO_CALL_INFO* pResult); void eeGetFieldInfo(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_ACCESS_FLAGS flags, CORINFO_FIELD_INFO* pResult); // Get the flags bool eeIsValueClass(CORINFO_CLASS_HANDLE clsHnd); bool eeIsIntrinsic(CORINFO_METHOD_HANDLE ftn); bool eeIsFieldStatic(CORINFO_FIELD_HANDLE fldHnd); var_types eeGetFieldType(CORINFO_FIELD_HANDLE fldHnd, CORINFO_CLASS_HANDLE* pStructHnd = nullptr); #if defined(DEBUG) || defined(FEATURE_JIT_METHOD_PERF) || defined(FEATURE_SIMD) || defined(TRACK_LSRA_STATS) const char* eeGetMethodName(CORINFO_METHOD_HANDLE hnd, const char** className); const char* eeGetMethodFullName(CORINFO_METHOD_HANDLE hnd); unsigned compMethodHash(CORINFO_METHOD_HANDLE methodHandle); bool eeIsNativeMethod(CORINFO_METHOD_HANDLE method); CORINFO_METHOD_HANDLE eeGetMethodHandleForNative(CORINFO_METHOD_HANDLE method); #endif var_types eeGetArgType(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* sig); var_types eeGetArgType(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* sig, bool* isPinned); CORINFO_CLASS_HANDLE eeGetArgClass(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE list); CORINFO_CLASS_HANDLE eeGetClassFromContext(CORINFO_CONTEXT_HANDLE context); unsigned eeGetArgSize(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* sig); static unsigned eeGetArgSizeAlignment(var_types type, bool isFloatHfa); // VOM info, method sigs void eeGetSig(unsigned sigTok, CORINFO_MODULE_HANDLE scope, CORINFO_CONTEXT_HANDLE context, CORINFO_SIG_INFO* retSig); void eeGetCallSiteSig(unsigned sigTok, CORINFO_MODULE_HANDLE scope, CORINFO_CONTEXT_HANDLE context, CORINFO_SIG_INFO* retSig); void eeGetMethodSig(CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* retSig, CORINFO_CLASS_HANDLE owner = nullptr); // Method entry-points, instrs CORINFO_METHOD_HANDLE eeMarkNativeTarget(CORINFO_METHOD_HANDLE method); CORINFO_EE_INFO eeInfo; bool eeInfoInitialized; CORINFO_EE_INFO* eeGetEEInfo(); // Gets the offset of a SDArray's first element static unsigned eeGetArrayDataOffset(); // Get the offset of a MDArray's first element static unsigned eeGetMDArrayDataOffset(unsigned rank); // Get the offset of a MDArray's dimension length for a given dimension. static unsigned eeGetMDArrayLengthOffset(unsigned rank, unsigned dimension); // Get the offset of a MDArray's lower bound for a given dimension. static unsigned eeGetMDArrayLowerBoundOffset(unsigned rank, unsigned dimension); GenTree* eeGetPInvokeCookie(CORINFO_SIG_INFO* szMetaSig); // Returns the page size for the target machine as reported by the EE. target_size_t eeGetPageSize() { return (target_size_t)eeGetEEInfo()->osPageSize; } //------------------------------------------------------------------------ // VirtualStubParam: virtual stub dispatch extra parameter (slot address). // // It represents Abi and target specific registers for the parameter. // class VirtualStubParamInfo { public: VirtualStubParamInfo(bool isCoreRTABI) { #if defined(TARGET_X86) reg = REG_EAX; regMask = RBM_EAX; #elif defined(TARGET_AMD64) if (isCoreRTABI) { reg = REG_R10; regMask = RBM_R10; } else { reg = REG_R11; regMask = RBM_R11; } #elif defined(TARGET_ARM) if (isCoreRTABI) { reg = REG_R12; regMask = RBM_R12; } else { reg = REG_R4; regMask = RBM_R4; } #elif defined(TARGET_ARM64) reg = REG_R11; regMask = RBM_R11; #else #error Unsupported or unset target architecture #endif } regNumber GetReg() const { return reg; } _regMask_enum GetRegMask() const { return regMask; } private: regNumber reg; _regMask_enum regMask; }; VirtualStubParamInfo* virtualStubParamInfo; bool IsTargetAbi(CORINFO_RUNTIME_ABI abi) { return eeGetEEInfo()->targetAbi == abi; } bool generateCFIUnwindCodes() { #if defined(FEATURE_CFI_SUPPORT) return TargetOS::IsUnix && IsTargetAbi(CORINFO_CORERT_ABI); #else return false; #endif } // Debugging support - Line number info void eeGetStmtOffsets(); unsigned eeBoundariesCount; ICorDebugInfo::OffsetMapping* eeBoundaries; // Boundaries to report to the EE void eeSetLIcount(unsigned count); void eeSetLIinfo(unsigned which, UNATIVE_OFFSET offs, IPmappingDscKind kind, const ILLocation& loc); void eeSetLIdone(); #ifdef DEBUG static void eeDispILOffs(IL_OFFSET offs); static void eeDispSourceMappingOffs(uint32_t offs); static void eeDispLineInfo(const ICorDebugInfo::OffsetMapping* line); void eeDispLineInfos(); #endif // DEBUG // Debugging support - Local var info void eeGetVars(); unsigned eeVarsCount; struct VarResultInfo { UNATIVE_OFFSET startOffset; UNATIVE_OFFSET endOffset; DWORD varNumber; CodeGenInterface::siVarLoc loc; } * eeVars; void eeSetLVcount(unsigned count); void eeSetLVinfo(unsigned which, UNATIVE_OFFSET startOffs, UNATIVE_OFFSET length, unsigned varNum, const CodeGenInterface::siVarLoc& loc); void eeSetLVdone(); #ifdef DEBUG void eeDispVar(ICorDebugInfo::NativeVarInfo* var); void eeDispVars(CORINFO_METHOD_HANDLE ftn, ULONG32 cVars, ICorDebugInfo::NativeVarInfo* vars); #endif // DEBUG // ICorJitInfo wrappers void eeReserveUnwindInfo(bool isFunclet, bool isColdCode, ULONG unwindSize); void eeAllocUnwindInfo(BYTE* pHotCode, BYTE* pColdCode, ULONG startOffset, ULONG endOffset, ULONG unwindSize, BYTE* pUnwindBlock, CorJitFuncKind funcKind); void eeSetEHcount(unsigned cEH); void eeSetEHinfo(unsigned EHnumber, const CORINFO_EH_CLAUSE* clause); WORD eeGetRelocTypeHint(void* target); // ICorStaticInfo wrapper functions bool eeTryResolveToken(CORINFO_RESOLVED_TOKEN* resolvedToken); #if defined(UNIX_AMD64_ABI) #ifdef DEBUG static void dumpSystemVClassificationType(SystemVClassificationType ct); #endif // DEBUG void eeGetSystemVAmd64PassStructInRegisterDescriptor( /*IN*/ CORINFO_CLASS_HANDLE structHnd, /*OUT*/ SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* structPassInRegDescPtr); #endif // UNIX_AMD64_ABI template <typename ParamType> bool eeRunWithErrorTrap(void (*function)(ParamType*), ParamType* param) { return eeRunWithErrorTrapImp(reinterpret_cast<void (*)(void*)>(function), reinterpret_cast<void*>(param)); } bool eeRunWithErrorTrapImp(void (*function)(void*), void* param); template <typename ParamType> bool eeRunWithSPMIErrorTrap(void (*function)(ParamType*), ParamType* param) { return eeRunWithSPMIErrorTrapImp(reinterpret_cast<void (*)(void*)>(function), reinterpret_cast<void*>(param)); } bool eeRunWithSPMIErrorTrapImp(void (*function)(void*), void* param); // Utility functions const char* eeGetFieldName(CORINFO_FIELD_HANDLE fieldHnd, const char** classNamePtr = nullptr); #if defined(DEBUG) const WCHAR* eeGetCPString(size_t stringHandle); #endif const char* eeGetClassName(CORINFO_CLASS_HANDLE clsHnd); static CORINFO_METHOD_HANDLE eeFindHelper(unsigned helper); static CorInfoHelpFunc eeGetHelperNum(CORINFO_METHOD_HANDLE method); static bool IsSharedStaticHelper(GenTree* tree); static bool IsGcSafePoint(GenTreeCall* call); static CORINFO_FIELD_HANDLE eeFindJitDataOffs(unsigned jitDataOffs); // returns true/false if 'field' is a Jit Data offset static bool eeIsJitDataOffs(CORINFO_FIELD_HANDLE field); // returns a number < 0 if 'field' is not a Jit Data offset, otherwise the data offset (limited to 2GB) static int eeGetJitDataOffs(CORINFO_FIELD_HANDLE field); /*****************************************************************************/ /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX CodeGenerator XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: CodeGenInterface* codeGen; // Record the instr offset mapping to the generated code jitstd::list<IPmappingDsc> genIPmappings; #ifdef DEBUG jitstd::list<PreciseIPMapping> genPreciseIPmappings; #endif // Managed RetVal - A side hash table meant to record the mapping from a // GT_CALL node to its debug info. This info is used to emit sequence points // that can be used by debugger to determine the native offset at which the // managed RetVal will be available. // // In fact we can store debug info in a GT_CALL node. This was ruled out in // favor of a side table for two reasons: 1) We need debug info for only those // GT_CALL nodes (created during importation) that correspond to an IL call and // whose return type is other than TYP_VOID. 2) GT_CALL node is a frequently used // structure and IL offset is needed only when generating debuggable code. Therefore // it is desirable to avoid memory size penalty in retail scenarios. typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, DebugInfo> CallSiteDebugInfoTable; CallSiteDebugInfoTable* genCallSite2DebugInfoMap; unsigned genReturnLocal; // Local number for the return value when applicable. BasicBlock* genReturnBB; // jumped to when not optimizing for speed. // The following properties are part of CodeGenContext. Getters are provided here for // convenience and backward compatibility, but the properties can only be set by invoking // the setter on CodeGenContext directly. emitter* GetEmitter() const { return codeGen->GetEmitter(); } bool isFramePointerUsed() const { return codeGen->isFramePointerUsed(); } bool GetInterruptible() { return codeGen->GetInterruptible(); } void SetInterruptible(bool value) { codeGen->SetInterruptible(value); } #if DOUBLE_ALIGN const bool genDoubleAlign() { return codeGen->doDoubleAlign(); } DWORD getCanDoubleAlign(); bool shouldDoubleAlign(unsigned refCntStk, unsigned refCntReg, weight_t refCntWtdReg, unsigned refCntStkParam, weight_t refCntWtdStkDbl); #endif // DOUBLE_ALIGN bool IsFullPtrRegMapRequired() { return codeGen->IsFullPtrRegMapRequired(); } void SetFullPtrRegMapRequired(bool value) { codeGen->SetFullPtrRegMapRequired(value); } // Things that MAY belong either in CodeGen or CodeGenContext #if defined(FEATURE_EH_FUNCLETS) FuncInfoDsc* compFuncInfos; unsigned short compCurrFuncIdx; unsigned short compFuncInfoCount; unsigned short compFuncCount() { assert(fgFuncletsCreated); return compFuncInfoCount; } #else // !FEATURE_EH_FUNCLETS // This is a no-op when there are no funclets! void genUpdateCurrentFunclet(BasicBlock* block) { return; } FuncInfoDsc compFuncInfoRoot; static const unsigned compCurrFuncIdx = 0; unsigned short compFuncCount() { return 1; } #endif // !FEATURE_EH_FUNCLETS FuncInfoDsc* funCurrentFunc(); void funSetCurrentFunc(unsigned funcIdx); FuncInfoDsc* funGetFunc(unsigned funcIdx); unsigned int funGetFuncIdx(BasicBlock* block); // LIVENESS VARSET_TP compCurLife; // current live variables GenTree* compCurLifeTree; // node after which compCurLife has been computed // Compare the given "newLife" with last set of live variables and update // codeGen "gcInfo", siScopes, "regSet" with the new variable's homes/liveness. template <bool ForCodeGen> void compChangeLife(VARSET_VALARG_TP newLife); // Update the GC's masks, register's masks and reports change on variable's homes given a set of // current live variables if changes have happened since "compCurLife". template <bool ForCodeGen> inline void compUpdateLife(VARSET_VALARG_TP newLife); // Gets a register mask that represent the kill set for a helper call since // not all JIT Helper calls follow the standard ABI on the target architecture. regMaskTP compHelperCallKillSet(CorInfoHelpFunc helper); #ifdef TARGET_ARM // Requires that "varDsc" be a promoted struct local variable being passed as an argument, beginning at // "firstArgRegNum", which is assumed to have already been aligned to the register alignment restriction of the // struct type. Adds bits to "*pArgSkippedRegMask" for any argument registers *not* used in passing "varDsc" -- // i.e., internal "holes" caused by internal alignment constraints. For example, if the struct contained an int and // a double, and we at R0 (on ARM), then R1 would be skipped, and the bit for R1 would be added to the mask. void fgAddSkippedRegsInPromotedStructArg(LclVarDsc* varDsc, unsigned firstArgRegNum, regMaskTP* pArgSkippedRegMask); #endif // TARGET_ARM // If "tree" is a indirection (GT_IND, or GT_OBJ) whose arg is an ADDR, whose arg is a LCL_VAR, return that LCL_VAR // node, else NULL. static GenTreeLclVar* fgIsIndirOfAddrOfLocal(GenTree* tree); // This map is indexed by GT_OBJ nodes that are address of promoted struct variables, which // have been annotated with the GTF_VAR_DEATH flag. If such a node is *not* mapped in this // table, one may assume that all the (tracked) field vars die at this GT_OBJ. Otherwise, // the node maps to a pointer to a VARSET_TP, containing set bits for each of the tracked field // vars of the promoted struct local that go dead at the given node (the set bits are the bits // for the tracked var indices of the field vars, as in a live var set). // // The map is allocated on demand so all map operations should use one of the following three // wrapper methods. NodeToVarsetPtrMap* m_promotedStructDeathVars; NodeToVarsetPtrMap* GetPromotedStructDeathVars() { if (m_promotedStructDeathVars == nullptr) { m_promotedStructDeathVars = new (getAllocator()) NodeToVarsetPtrMap(getAllocator()); } return m_promotedStructDeathVars; } void ClearPromotedStructDeathVars() { if (m_promotedStructDeathVars != nullptr) { m_promotedStructDeathVars->RemoveAll(); } } bool LookupPromotedStructDeathVars(GenTree* tree, VARSET_TP** bits) { *bits = nullptr; bool result = false; if (m_promotedStructDeathVars != nullptr) { result = m_promotedStructDeathVars->Lookup(tree, bits); } return result; } /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX UnwindInfo XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #if !defined(__GNUC__) #pragma region Unwind information #endif public: // // Infrastructure functions: start/stop/reserve/emit. // void unwindBegProlog(); void unwindEndProlog(); void unwindBegEpilog(); void unwindEndEpilog(); void unwindReserve(); void unwindEmit(void* pHotCode, void* pColdCode); // // Specific unwind information functions: called by code generation to indicate a particular // prolog or epilog unwindable instruction has been generated. // void unwindPush(regNumber reg); void unwindAllocStack(unsigned size); void unwindSetFrameReg(regNumber reg, unsigned offset); void unwindSaveReg(regNumber reg, unsigned offset); #if defined(TARGET_ARM) void unwindPushMaskInt(regMaskTP mask); void unwindPushMaskFloat(regMaskTP mask); void unwindPopMaskInt(regMaskTP mask); void unwindPopMaskFloat(regMaskTP mask); void unwindBranch16(); // The epilog terminates with a 16-bit branch (e.g., "bx lr") void unwindNop(unsigned codeSizeInBytes); // Generate unwind NOP code. 'codeSizeInBytes' is 2 or 4 bytes. Only // called via unwindPadding(). void unwindPadding(); // Generate a sequence of unwind NOP codes representing instructions between the last // instruction and the current location. #endif // TARGET_ARM #if defined(TARGET_ARM64) void unwindNop(); void unwindPadding(); // Generate a sequence of unwind NOP codes representing instructions between the last // instruction and the current location. void unwindSaveReg(regNumber reg, int offset); // str reg, [sp, #offset] void unwindSaveRegPreindexed(regNumber reg, int offset); // str reg, [sp, #offset]! void unwindSaveRegPair(regNumber reg1, regNumber reg2, int offset); // stp reg1, reg2, [sp, #offset] void unwindSaveRegPairPreindexed(regNumber reg1, regNumber reg2, int offset); // stp reg1, reg2, [sp, #offset]! void unwindSaveNext(); // unwind code: save_next void unwindReturn(regNumber reg); // ret lr #endif // defined(TARGET_ARM64) // // Private "helper" functions for the unwind implementation. // private: #if defined(FEATURE_EH_FUNCLETS) void unwindGetFuncLocations(FuncInfoDsc* func, bool getHotSectionData, /* OUT */ emitLocation** ppStartLoc, /* OUT */ emitLocation** ppEndLoc); #endif // FEATURE_EH_FUNCLETS void unwindReserveFunc(FuncInfoDsc* func); void unwindEmitFunc(FuncInfoDsc* func, void* pHotCode, void* pColdCode); #if defined(TARGET_AMD64) || (defined(TARGET_X86) && defined(FEATURE_EH_FUNCLETS)) void unwindReserveFuncHelper(FuncInfoDsc* func, bool isHotCode); void unwindEmitFuncHelper(FuncInfoDsc* func, void* pHotCode, void* pColdCode, bool isHotCode); #endif // TARGET_AMD64 || (TARGET_X86 && FEATURE_EH_FUNCLETS) UNATIVE_OFFSET unwindGetCurrentOffset(FuncInfoDsc* func); #if defined(TARGET_AMD64) void unwindBegPrologWindows(); void unwindPushWindows(regNumber reg); void unwindAllocStackWindows(unsigned size); void unwindSetFrameRegWindows(regNumber reg, unsigned offset); void unwindSaveRegWindows(regNumber reg, unsigned offset); #ifdef UNIX_AMD64_ABI void unwindSaveRegCFI(regNumber reg, unsigned offset); #endif // UNIX_AMD64_ABI #elif defined(TARGET_ARM) void unwindPushPopMaskInt(regMaskTP mask, bool useOpsize16); void unwindPushPopMaskFloat(regMaskTP mask); #endif // TARGET_ARM #if defined(FEATURE_CFI_SUPPORT) short mapRegNumToDwarfReg(regNumber reg); void createCfiCode(FuncInfoDsc* func, UNATIVE_OFFSET codeOffset, UCHAR opcode, short dwarfReg, INT offset = 0); void unwindPushPopCFI(regNumber reg); void unwindBegPrologCFI(); void unwindPushPopMaskCFI(regMaskTP regMask, bool isFloat); void unwindAllocStackCFI(unsigned size); void unwindSetFrameRegCFI(regNumber reg, unsigned offset); void unwindEmitFuncCFI(FuncInfoDsc* func, void* pHotCode, void* pColdCode); #ifdef DEBUG void DumpCfiInfo(bool isHotCode, UNATIVE_OFFSET startOffset, UNATIVE_OFFSET endOffset, DWORD cfiCodeBytes, const CFI_CODE* const pCfiCode); #endif #endif // FEATURE_CFI_SUPPORT #if !defined(__GNUC__) #pragma endregion // Note: region is NOT under !defined(__GNUC__) #endif /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX SIMD XX XX XX XX Info about SIMD types, methods and the SIMD assembly (i.e. the assembly XX XX that contains the distinguished, well-known SIMD type definitions). XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ bool IsBaselineSimdIsaSupported() { #ifdef FEATURE_SIMD #if defined(TARGET_XARCH) CORINFO_InstructionSet minimumIsa = InstructionSet_SSE2; #elif defined(TARGET_ARM64) CORINFO_InstructionSet minimumIsa = InstructionSet_AdvSimd; #else #error Unsupported platform #endif // !TARGET_XARCH && !TARGET_ARM64 return compOpportunisticallyDependsOn(minimumIsa); #else return false; #endif } #if defined(DEBUG) bool IsBaselineSimdIsaSupportedDebugOnly() { #ifdef FEATURE_SIMD #if defined(TARGET_XARCH) CORINFO_InstructionSet minimumIsa = InstructionSet_SSE2; #elif defined(TARGET_ARM64) CORINFO_InstructionSet minimumIsa = InstructionSet_AdvSimd; #else #error Unsupported platform #endif // !TARGET_XARCH && !TARGET_ARM64 return compIsaSupportedDebugOnly(minimumIsa); #else return false; #endif // FEATURE_SIMD } #endif // DEBUG // Get highest available level for SIMD codegen SIMDLevel getSIMDSupportLevel() { #if defined(TARGET_XARCH) if (compOpportunisticallyDependsOn(InstructionSet_AVX2)) { return SIMD_AVX2_Supported; } if (compOpportunisticallyDependsOn(InstructionSet_SSE42)) { return SIMD_SSE4_Supported; } // min bar is SSE2 return SIMD_SSE2_Supported; #else assert(!"Available instruction set(s) for SIMD codegen is not defined for target arch"); unreached(); return SIMD_Not_Supported; #endif } bool isIntrinsicType(CORINFO_CLASS_HANDLE clsHnd) { return info.compCompHnd->isIntrinsicType(clsHnd); } const char* getClassNameFromMetadata(CORINFO_CLASS_HANDLE cls, const char** namespaceName) { return info.compCompHnd->getClassNameFromMetadata(cls, namespaceName); } CORINFO_CLASS_HANDLE getTypeInstantiationArgument(CORINFO_CLASS_HANDLE cls, unsigned index) { return info.compCompHnd->getTypeInstantiationArgument(cls, index); } #ifdef FEATURE_SIMD // Should we support SIMD intrinsics? bool featureSIMD; // Should we recognize SIMD types? // We always do this on ARM64 to support HVA types. bool supportSIMDTypes() { #ifdef TARGET_ARM64 return true; #else return featureSIMD; #endif } // Have we identified any SIMD types? // This is currently used by struct promotion to avoid getting type information for a struct // field to see if it is a SIMD type, if we haven't seen any SIMD types or operations in // the method. bool _usesSIMDTypes; bool usesSIMDTypes() { return _usesSIMDTypes; } void setUsesSIMDTypes(bool value) { _usesSIMDTypes = value; } // This is a temp lclVar allocated on the stack as TYP_SIMD. It is used to implement intrinsics // that require indexed access to the individual fields of the vector, which is not well supported // by the hardware. It is allocated when/if such situations are encountered during Lowering. unsigned lvaSIMDInitTempVarNum; struct SIMDHandlesCache { // SIMD Types CORINFO_CLASS_HANDLE SIMDFloatHandle; CORINFO_CLASS_HANDLE SIMDDoubleHandle; CORINFO_CLASS_HANDLE SIMDIntHandle; CORINFO_CLASS_HANDLE SIMDUShortHandle; CORINFO_CLASS_HANDLE SIMDUByteHandle; CORINFO_CLASS_HANDLE SIMDShortHandle; CORINFO_CLASS_HANDLE SIMDByteHandle; CORINFO_CLASS_HANDLE SIMDLongHandle; CORINFO_CLASS_HANDLE SIMDUIntHandle; CORINFO_CLASS_HANDLE SIMDULongHandle; CORINFO_CLASS_HANDLE SIMDNIntHandle; CORINFO_CLASS_HANDLE SIMDNUIntHandle; CORINFO_CLASS_HANDLE SIMDVector2Handle; CORINFO_CLASS_HANDLE SIMDVector3Handle; CORINFO_CLASS_HANDLE SIMDVector4Handle; CORINFO_CLASS_HANDLE SIMDVectorHandle; #ifdef FEATURE_HW_INTRINSICS #if defined(TARGET_ARM64) CORINFO_CLASS_HANDLE Vector64FloatHandle; CORINFO_CLASS_HANDLE Vector64DoubleHandle; CORINFO_CLASS_HANDLE Vector64IntHandle; CORINFO_CLASS_HANDLE Vector64UShortHandle; CORINFO_CLASS_HANDLE Vector64UByteHandle; CORINFO_CLASS_HANDLE Vector64ShortHandle; CORINFO_CLASS_HANDLE Vector64ByteHandle; CORINFO_CLASS_HANDLE Vector64LongHandle; CORINFO_CLASS_HANDLE Vector64UIntHandle; CORINFO_CLASS_HANDLE Vector64ULongHandle; CORINFO_CLASS_HANDLE Vector64NIntHandle; CORINFO_CLASS_HANDLE Vector64NUIntHandle; #endif // defined(TARGET_ARM64) CORINFO_CLASS_HANDLE Vector128FloatHandle; CORINFO_CLASS_HANDLE Vector128DoubleHandle; CORINFO_CLASS_HANDLE Vector128IntHandle; CORINFO_CLASS_HANDLE Vector128UShortHandle; CORINFO_CLASS_HANDLE Vector128UByteHandle; CORINFO_CLASS_HANDLE Vector128ShortHandle; CORINFO_CLASS_HANDLE Vector128ByteHandle; CORINFO_CLASS_HANDLE Vector128LongHandle; CORINFO_CLASS_HANDLE Vector128UIntHandle; CORINFO_CLASS_HANDLE Vector128ULongHandle; CORINFO_CLASS_HANDLE Vector128NIntHandle; CORINFO_CLASS_HANDLE Vector128NUIntHandle; #if defined(TARGET_XARCH) CORINFO_CLASS_HANDLE Vector256FloatHandle; CORINFO_CLASS_HANDLE Vector256DoubleHandle; CORINFO_CLASS_HANDLE Vector256IntHandle; CORINFO_CLASS_HANDLE Vector256UShortHandle; CORINFO_CLASS_HANDLE Vector256UByteHandle; CORINFO_CLASS_HANDLE Vector256ShortHandle; CORINFO_CLASS_HANDLE Vector256ByteHandle; CORINFO_CLASS_HANDLE Vector256LongHandle; CORINFO_CLASS_HANDLE Vector256UIntHandle; CORINFO_CLASS_HANDLE Vector256ULongHandle; CORINFO_CLASS_HANDLE Vector256NIntHandle; CORINFO_CLASS_HANDLE Vector256NUIntHandle; #endif // defined(TARGET_XARCH) #endif // FEATURE_HW_INTRINSICS SIMDHandlesCache() { memset(this, 0, sizeof(*this)); } }; SIMDHandlesCache* m_simdHandleCache; // Get an appropriate "zero" for the given type and class handle. GenTree* gtGetSIMDZero(var_types simdType, CorInfoType simdBaseJitType, CORINFO_CLASS_HANDLE simdHandle); // Get the handle for a SIMD type. CORINFO_CLASS_HANDLE gtGetStructHandleForSIMD(var_types simdType, CorInfoType simdBaseJitType) { if (m_simdHandleCache == nullptr) { // This may happen if the JIT generates SIMD node on its own, without importing them. // Otherwise getBaseJitTypeAndSizeOfSIMDType should have created the cache. return NO_CLASS_HANDLE; } if (simdBaseJitType == CORINFO_TYPE_FLOAT) { switch (simdType) { case TYP_SIMD8: return m_simdHandleCache->SIMDVector2Handle; case TYP_SIMD12: return m_simdHandleCache->SIMDVector3Handle; case TYP_SIMD16: if ((getSIMDVectorType() == TYP_SIMD32) || (m_simdHandleCache->SIMDVector4Handle != NO_CLASS_HANDLE)) { return m_simdHandleCache->SIMDVector4Handle; } break; case TYP_SIMD32: break; default: unreached(); } } assert(emitTypeSize(simdType) <= largestEnregisterableStructSize()); switch (simdBaseJitType) { case CORINFO_TYPE_FLOAT: return m_simdHandleCache->SIMDFloatHandle; case CORINFO_TYPE_DOUBLE: return m_simdHandleCache->SIMDDoubleHandle; case CORINFO_TYPE_INT: return m_simdHandleCache->SIMDIntHandle; case CORINFO_TYPE_USHORT: return m_simdHandleCache->SIMDUShortHandle; case CORINFO_TYPE_UBYTE: return m_simdHandleCache->SIMDUByteHandle; case CORINFO_TYPE_SHORT: return m_simdHandleCache->SIMDShortHandle; case CORINFO_TYPE_BYTE: return m_simdHandleCache->SIMDByteHandle; case CORINFO_TYPE_LONG: return m_simdHandleCache->SIMDLongHandle; case CORINFO_TYPE_UINT: return m_simdHandleCache->SIMDUIntHandle; case CORINFO_TYPE_ULONG: return m_simdHandleCache->SIMDULongHandle; case CORINFO_TYPE_NATIVEINT: return m_simdHandleCache->SIMDNIntHandle; case CORINFO_TYPE_NATIVEUINT: return m_simdHandleCache->SIMDNUIntHandle; default: assert(!"Didn't find a class handle for simdType"); } return NO_CLASS_HANDLE; } // Returns true if this is a SIMD type that should be considered an opaque // vector type (i.e. do not analyze or promote its fields). // Note that all but the fixed vector types are opaque, even though they may // actually be declared as having fields. bool isOpaqueSIMDType(CORINFO_CLASS_HANDLE structHandle) const { return ((m_simdHandleCache != nullptr) && (structHandle != m_simdHandleCache->SIMDVector2Handle) && (structHandle != m_simdHandleCache->SIMDVector3Handle) && (structHandle != m_simdHandleCache->SIMDVector4Handle)); } // Returns true if the tree corresponds to a TYP_SIMD lcl var. // Note that both SIMD vector args and locals are mared as lvSIMDType = true, but // type of an arg node is TYP_BYREF and a local node is TYP_SIMD or TYP_STRUCT. bool isSIMDTypeLocal(GenTree* tree) { return tree->OperIsLocal() && lvaGetDesc(tree->AsLclVarCommon())->lvSIMDType; } // Returns true if the lclVar is an opaque SIMD type. bool isOpaqueSIMDLclVar(const LclVarDsc* varDsc) const { if (!varDsc->lvSIMDType) { return false; } return isOpaqueSIMDType(varDsc->GetStructHnd()); } static bool isRelOpSIMDIntrinsic(SIMDIntrinsicID intrinsicId) { return (intrinsicId == SIMDIntrinsicEqual); } // Returns base JIT type of a TYP_SIMD local. // Returns CORINFO_TYPE_UNDEF if the local is not TYP_SIMD. CorInfoType getBaseJitTypeOfSIMDLocal(GenTree* tree) { if (isSIMDTypeLocal(tree)) { return lvaGetDesc(tree->AsLclVarCommon())->GetSimdBaseJitType(); } return CORINFO_TYPE_UNDEF; } bool isSIMDClass(CORINFO_CLASS_HANDLE clsHnd) { if (isIntrinsicType(clsHnd)) { const char* namespaceName = nullptr; (void)getClassNameFromMetadata(clsHnd, &namespaceName); return strcmp(namespaceName, "System.Numerics") == 0; } return false; } bool isSIMDClass(typeInfo* pTypeInfo) { return pTypeInfo->IsStruct() && isSIMDClass(pTypeInfo->GetClassHandleForValueClass()); } bool isHWSIMDClass(CORINFO_CLASS_HANDLE clsHnd) { #ifdef FEATURE_HW_INTRINSICS if (isIntrinsicType(clsHnd)) { const char* namespaceName = nullptr; (void)getClassNameFromMetadata(clsHnd, &namespaceName); return strcmp(namespaceName, "System.Runtime.Intrinsics") == 0; } #endif // FEATURE_HW_INTRINSICS return false; } bool isHWSIMDClass(typeInfo* pTypeInfo) { #ifdef FEATURE_HW_INTRINSICS return pTypeInfo->IsStruct() && isHWSIMDClass(pTypeInfo->GetClassHandleForValueClass()); #else return false; #endif } bool isSIMDorHWSIMDClass(CORINFO_CLASS_HANDLE clsHnd) { return isSIMDClass(clsHnd) || isHWSIMDClass(clsHnd); } bool isSIMDorHWSIMDClass(typeInfo* pTypeInfo) { return isSIMDClass(pTypeInfo) || isHWSIMDClass(pTypeInfo); } // Get the base (element) type and size in bytes for a SIMD type. Returns CORINFO_TYPE_UNDEF // if it is not a SIMD type or is an unsupported base JIT type. CorInfoType getBaseJitTypeAndSizeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd, unsigned* sizeBytes = nullptr); CorInfoType getBaseJitTypeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd) { return getBaseJitTypeAndSizeOfSIMDType(typeHnd, nullptr); } // Get SIMD Intrinsic info given the method handle. // Also sets typeHnd, argCount, baseType and sizeBytes out params. const SIMDIntrinsicInfo* getSIMDIntrinsicInfo(CORINFO_CLASS_HANDLE* typeHnd, CORINFO_METHOD_HANDLE methodHnd, CORINFO_SIG_INFO* sig, bool isNewObj, unsigned* argCount, CorInfoType* simdBaseJitType, unsigned* sizeBytes); // Pops and returns GenTree node from importers type stack. // Normalizes TYP_STRUCT value in case of GT_CALL, GT_RET_EXPR and arg nodes. GenTree* impSIMDPopStack(var_types type, bool expectAddr = false, CORINFO_CLASS_HANDLE structType = nullptr); // Transforms operands and returns the SIMD intrinsic to be applied on // transformed operands to obtain given relop result. SIMDIntrinsicID impSIMDRelOp(SIMDIntrinsicID relOpIntrinsicId, CORINFO_CLASS_HANDLE typeHnd, unsigned simdVectorSize, CorInfoType* inOutBaseJitType, GenTree** op1, GenTree** op2); #if defined(TARGET_XARCH) // Transforms operands and returns the SIMD intrinsic to be applied on // transformed operands to obtain == comparison result. SIMDIntrinsicID impSIMDLongRelOpEqual(CORINFO_CLASS_HANDLE typeHnd, unsigned simdVectorSize, GenTree** op1, GenTree** op2); #endif // defined(TARGET_XARCH) void setLclRelatedToSIMDIntrinsic(GenTree* tree); bool areFieldsContiguous(GenTree* op1, GenTree* op2); bool areLocalFieldsContiguous(GenTreeLclFld* first, GenTreeLclFld* second); bool areArrayElementsContiguous(GenTree* op1, GenTree* op2); bool areArgumentsContiguous(GenTree* op1, GenTree* op2); GenTree* createAddressNodeForSIMDInit(GenTree* tree, unsigned simdSize); // check methodHnd to see if it is a SIMD method that is expanded as an intrinsic in the JIT. GenTree* impSIMDIntrinsic(OPCODE opcode, GenTree* newobjThis, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, unsigned methodFlags, int memberRef); GenTree* getOp1ForConstructor(OPCODE opcode, GenTree* newobjThis, CORINFO_CLASS_HANDLE clsHnd); // Whether SIMD vector occupies part of SIMD register. // SSE2: vector2f/3f are considered sub register SIMD types. // AVX: vector2f, 3f and 4f are all considered sub register SIMD types. bool isSubRegisterSIMDType(GenTreeSIMD* simdNode) { unsigned vectorRegisterByteLength; #if defined(TARGET_XARCH) // Calling the getSIMDVectorRegisterByteLength api causes the size of Vector<T> to be recorded // with the AOT compiler, so that it cannot change from aot compilation time to runtime // This api does not require such fixing as it merely pertains to the size of the simd type // relative to the Vector<T> size as used at compile time. (So detecting a vector length of 16 here // does not preclude the code from being used on a machine with a larger vector length.) if (getSIMDSupportLevel() < SIMD_AVX2_Supported) { vectorRegisterByteLength = 16; } else { vectorRegisterByteLength = 32; } #else vectorRegisterByteLength = getSIMDVectorRegisterByteLength(); #endif return (simdNode->GetSimdSize() < vectorRegisterByteLength); } // Get the type for the hardware SIMD vector. // This is the maximum SIMD type supported for this target. var_types getSIMDVectorType() { #if defined(TARGET_XARCH) if (getSIMDSupportLevel() == SIMD_AVX2_Supported) { return TYP_SIMD32; } else { // Verify and record that AVX2 isn't supported compVerifyInstructionSetUnusable(InstructionSet_AVX2); assert(getSIMDSupportLevel() >= SIMD_SSE2_Supported); return TYP_SIMD16; } #elif defined(TARGET_ARM64) return TYP_SIMD16; #else assert(!"getSIMDVectorType() unimplemented on target arch"); unreached(); #endif } // Get the size of the SIMD type in bytes int getSIMDTypeSizeInBytes(CORINFO_CLASS_HANDLE typeHnd) { unsigned sizeBytes = 0; (void)getBaseJitTypeAndSizeOfSIMDType(typeHnd, &sizeBytes); return sizeBytes; } // Get the the number of elements of baseType of SIMD vector given by its size and baseType static int getSIMDVectorLength(unsigned simdSize, var_types baseType); // Get the the number of elements of baseType of SIMD vector given by its type handle int getSIMDVectorLength(CORINFO_CLASS_HANDLE typeHnd); // Get preferred alignment of SIMD type. int getSIMDTypeAlignment(var_types simdType); // Get the number of bytes in a System.Numeric.Vector<T> for the current compilation. // Note - cannot be used for System.Runtime.Intrinsic unsigned getSIMDVectorRegisterByteLength() { #if defined(TARGET_XARCH) if (getSIMDSupportLevel() == SIMD_AVX2_Supported) { return YMM_REGSIZE_BYTES; } else { // Verify and record that AVX2 isn't supported compVerifyInstructionSetUnusable(InstructionSet_AVX2); assert(getSIMDSupportLevel() >= SIMD_SSE2_Supported); return XMM_REGSIZE_BYTES; } #elif defined(TARGET_ARM64) return FP_REGSIZE_BYTES; #else assert(!"getSIMDVectorRegisterByteLength() unimplemented on target arch"); unreached(); #endif } // The minimum and maximum possible number of bytes in a SIMD vector. // maxSIMDStructBytes // The minimum SIMD size supported by System.Numeric.Vectors or System.Runtime.Intrinsic // SSE: 16-byte Vector<T> and Vector128<T> // AVX: 32-byte Vector256<T> (Vector<T> is 16-byte) // AVX2: 32-byte Vector<T> and Vector256<T> unsigned int maxSIMDStructBytes() { #if defined(FEATURE_HW_INTRINSICS) && defined(TARGET_XARCH) if (compOpportunisticallyDependsOn(InstructionSet_AVX)) { return YMM_REGSIZE_BYTES; } else { // Verify and record that AVX2 isn't supported compVerifyInstructionSetUnusable(InstructionSet_AVX2); assert(getSIMDSupportLevel() >= SIMD_SSE2_Supported); return XMM_REGSIZE_BYTES; } #else return getSIMDVectorRegisterByteLength(); #endif } unsigned int minSIMDStructBytes() { return emitTypeSize(TYP_SIMD8); } public: // Returns the codegen type for a given SIMD size. static var_types getSIMDTypeForSize(unsigned size) { var_types simdType = TYP_UNDEF; if (size == 8) { simdType = TYP_SIMD8; } else if (size == 12) { simdType = TYP_SIMD12; } else if (size == 16) { simdType = TYP_SIMD16; } else if (size == 32) { simdType = TYP_SIMD32; } else { noway_assert(!"Unexpected size for SIMD type"); } return simdType; } private: unsigned getSIMDInitTempVarNum(var_types simdType); #else // !FEATURE_SIMD bool isOpaqueSIMDLclVar(LclVarDsc* varDsc) { return false; } #endif // FEATURE_SIMD public: //------------------------------------------------------------------------ // largestEnregisterableStruct: The size in bytes of the largest struct that can be enregistered. // // Notes: It is not guaranteed that the struct of this size or smaller WILL be a // candidate for enregistration. unsigned largestEnregisterableStructSize() { #ifdef FEATURE_SIMD #if defined(FEATURE_HW_INTRINSICS) && defined(TARGET_XARCH) if (opts.IsReadyToRun()) { // Return constant instead of maxSIMDStructBytes, as maxSIMDStructBytes performs // checks that are effected by the current level of instruction set support would // otherwise cause the highest level of instruction set support to be reported to crossgen2. // and this api is only ever used as an optimization or assert, so no reporting should // ever happen. return YMM_REGSIZE_BYTES; } #endif // defined(FEATURE_HW_INTRINSICS) && defined(TARGET_XARCH) unsigned vectorRegSize = maxSIMDStructBytes(); assert(vectorRegSize >= TARGET_POINTER_SIZE); return vectorRegSize; #else // !FEATURE_SIMD return TARGET_POINTER_SIZE; #endif // !FEATURE_SIMD } // Use to determine if a struct *might* be a SIMD type. As this function only takes a size, many // structs will fit the criteria. bool structSizeMightRepresentSIMDType(size_t structSize) { #ifdef FEATURE_SIMD // Do not use maxSIMDStructBytes as that api in R2R on X86 and X64 may notify the JIT // about the size of a struct under the assumption that the struct size needs to be recorded. // By using largestEnregisterableStructSize here, the detail of whether or not Vector256<T> is // enregistered or not will not be messaged to the R2R compiler. return (structSize >= minSIMDStructBytes()) && (structSize <= largestEnregisterableStructSize()); #else return false; #endif // FEATURE_SIMD } #ifdef FEATURE_SIMD static bool vnEncodesResultTypeForSIMDIntrinsic(SIMDIntrinsicID intrinsicId); #endif // !FEATURE_SIMD #ifdef FEATURE_HW_INTRINSICS static bool vnEncodesResultTypeForHWIntrinsic(NamedIntrinsic hwIntrinsicID); #endif // FEATURE_HW_INTRINSICS private: // These routines need not be enclosed under FEATURE_SIMD since lvIsSIMDType() // is defined for both FEATURE_SIMD and !FEATURE_SIMD apropriately. The use // of this routines also avoids the need of #ifdef FEATURE_SIMD specific code. // Is this var is of type simd struct? bool lclVarIsSIMDType(unsigned varNum) { return lvaGetDesc(varNum)->lvIsSIMDType(); } // Is this Local node a SIMD local? bool lclVarIsSIMDType(GenTreeLclVarCommon* lclVarTree) { return lclVarIsSIMDType(lclVarTree->GetLclNum()); } // Returns true if the TYP_SIMD locals on stack are aligned at their // preferred byte boundary specified by getSIMDTypeAlignment(). // // As per the Intel manual, the preferred alignment for AVX vectors is // 32-bytes. It is not clear whether additional stack space used in // aligning stack is worth the benefit and for now will use 16-byte // alignment for AVX 256-bit vectors with unaligned load/stores to/from // memory. On x86, the stack frame is aligned to 4 bytes. We need to extend // existing support for double (8-byte) alignment to 16 or 32 byte // alignment for frames with local SIMD vars, if that is determined to be // profitable. // // On Amd64 and SysV, RSP+8 is aligned on entry to the function (before // prolog has run). This means that in RBP-based frames RBP will be 16-byte // aligned. For RSP-based frames these are only sometimes aligned, depending // on the frame size. // bool isSIMDTypeLocalAligned(unsigned varNum) { #if defined(FEATURE_SIMD) && ALIGN_SIMD_TYPES if (lclVarIsSIMDType(varNum) && lvaTable[varNum].lvType != TYP_BYREF) { // TODO-Cleanup: Can't this use the lvExactSize on the varDsc? int alignment = getSIMDTypeAlignment(lvaTable[varNum].lvType); if (alignment <= STACK_ALIGN) { bool rbpBased; int off = lvaFrameAddress(varNum, &rbpBased); // On SysV and Winx64 ABIs RSP+8 will be 16-byte aligned at the // first instruction of a function. If our frame is RBP based // then RBP will always be 16 bytes aligned, so we can simply // check the offset. if (rbpBased) { return (off % alignment) == 0; } // For RSP-based frame the alignment of RSP depends on our // locals. rsp+8 is aligned on entry and we just subtract frame // size so it is not hard to compute. Note that the compiler // tries hard to make sure the frame size means RSP will be // 16-byte aligned, but for leaf functions without locals (i.e. // frameSize = 0) it will not be. int frameSize = codeGen->genTotalFrameSize(); return ((8 - frameSize + off) % alignment) == 0; } } #endif // FEATURE_SIMD return false; } #ifdef DEBUG // Answer the question: Is a particular ISA supported? // Use this api when asking the question so that future // ISA questions can be asked correctly or when asserting // support/nonsupport for an instruction set bool compIsaSupportedDebugOnly(CORINFO_InstructionSet isa) const { #if defined(TARGET_XARCH) || defined(TARGET_ARM64) return (opts.compSupportsISA & (1ULL << isa)) != 0; #else return false; #endif } #endif // DEBUG bool notifyInstructionSetUsage(CORINFO_InstructionSet isa, bool supported) const; // Answer the question: Is a particular ISA allowed to be used implicitly by optimizations? // The result of this api call will exactly match the target machine // on which the function is executed (except for CoreLib, where there are special rules) bool compExactlyDependsOn(CORINFO_InstructionSet isa) const { #if defined(TARGET_XARCH) || defined(TARGET_ARM64) uint64_t isaBit = (1ULL << isa); if ((opts.compSupportsISAReported & isaBit) == 0) { if (notifyInstructionSetUsage(isa, (opts.compSupportsISA & isaBit) != 0)) ((Compiler*)this)->opts.compSupportsISAExactly |= isaBit; ((Compiler*)this)->opts.compSupportsISAReported |= isaBit; } return (opts.compSupportsISAExactly & isaBit) != 0; #else return false; #endif } // Ensure that code will not execute if an instruction set is usable. Call only // if the instruction set has previously reported as unusable, but when // that that status has not yet been recorded to the AOT compiler void compVerifyInstructionSetUnusable(CORINFO_InstructionSet isa) { // use compExactlyDependsOn to capture are record the use of the isa bool isaUsable = compExactlyDependsOn(isa); // Assert that the is unusable. If true, this function should never be called. assert(!isaUsable); } // Answer the question: Is a particular ISA allowed to be used implicitly by optimizations? // The result of this api call will match the target machine if the result is true // If the result is false, then the target machine may have support for the instruction bool compOpportunisticallyDependsOn(CORINFO_InstructionSet isa) const { if ((opts.compSupportsISA & (1ULL << isa)) != 0) { return compExactlyDependsOn(isa); } else { return false; } } // Answer the question: Is a particular ISA supported for explicit hardware intrinsics? bool compHWIntrinsicDependsOn(CORINFO_InstructionSet isa) const { // Report intent to use the ISA to the EE compExactlyDependsOn(isa); return ((opts.compSupportsISA & (1ULL << isa)) != 0); } bool canUseVexEncoding() const { #ifdef TARGET_XARCH return compOpportunisticallyDependsOn(InstructionSet_AVX); #else return false; #endif } /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Compiler XX XX XX XX Generic info about the compilation and the method being compiled. XX XX It is responsible for driving the other phases. XX XX It is also responsible for all the memory management. XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: Compiler* InlineeCompiler; // The Compiler instance for the inlinee InlineResult* compInlineResult; // The result of importing the inlinee method. bool compDoAggressiveInlining; // If true, mark every method as CORINFO_FLG_FORCEINLINE bool compJmpOpUsed; // Does the method do a JMP bool compLongUsed; // Does the method use TYP_LONG bool compFloatingPointUsed; // Does the method use TYP_FLOAT or TYP_DOUBLE bool compTailCallUsed; // Does the method do a tailcall bool compTailPrefixSeen; // Does the method IL have tail. prefix bool compLocallocSeen; // Does the method IL have localloc opcode bool compLocallocUsed; // Does the method use localloc. bool compLocallocOptimized; // Does the method have an optimized localloc bool compQmarkUsed; // Does the method use GT_QMARK/GT_COLON bool compQmarkRationalized; // Is it allowed to use a GT_QMARK/GT_COLON node. bool compHasBackwardJump; // Does the method (or some inlinee) have a lexically backwards jump? bool compHasBackwardJumpInHandler; // Does the method have a lexically backwards jump in a handler? bool compSwitchedToOptimized; // Codegen initially was Tier0 but jit switched to FullOpts bool compSwitchedToMinOpts; // Codegen initially was Tier1/FullOpts but jit switched to MinOpts bool compSuppressedZeroInit; // There are vars with lvSuppressedZeroInit set // NOTE: These values are only reliable after // the importing is completely finished. #ifdef DEBUG // State information - which phases have completed? // These are kept together for easy discoverability bool bRangeAllowStress; bool compCodeGenDone; int64_t compNumStatementLinksTraversed; // # of links traversed while doing debug checks bool fgNormalizeEHDone; // Has the flowgraph EH normalization phase been done? size_t compSizeEstimate; // The estimated size of the method as per `gtSetEvalOrder`. size_t compCycleEstimate; // The estimated cycle count of the method as per `gtSetEvalOrder` #endif // DEBUG bool fgLocalVarLivenessDone; // Note that this one is used outside of debug. bool fgLocalVarLivenessChanged; bool compLSRADone; bool compRationalIRForm; bool compUsesThrowHelper; // There is a call to a THROW_HELPER for the compiled method. bool compGeneratingProlog; bool compGeneratingEpilog; bool compNeedsGSSecurityCookie; // There is an unsafe buffer (or localloc) on the stack. // Insert cookie on frame and code to check the cookie, like VC++ -GS. bool compGSReorderStackLayout; // There is an unsafe buffer on the stack, reorder locals and make local // copies of susceptible parameters to avoid buffer overrun attacks through locals/params bool getNeedsGSSecurityCookie() const { return compNeedsGSSecurityCookie; } void setNeedsGSSecurityCookie() { compNeedsGSSecurityCookie = true; } FrameLayoutState lvaDoneFrameLayout; // The highest frame layout state that we've completed. During // frame layout calculations, this is the level we are currently // computing. //---------------------------- JITing options ----------------------------- enum codeOptimize { BLENDED_CODE, SMALL_CODE, FAST_CODE, COUNT_OPT_CODE }; struct Options { JitFlags* jitFlags; // all flags passed from the EE // The instruction sets that the compiler is allowed to emit. uint64_t compSupportsISA; // The instruction sets that were reported to the VM as being used by the current method. Subset of // compSupportsISA. uint64_t compSupportsISAReported; // The instruction sets that the compiler is allowed to take advantage of implicitly during optimizations. // Subset of compSupportsISA. // The instruction sets available in compSupportsISA and not available in compSupportsISAExactly can be only // used via explicit hardware intrinsics. uint64_t compSupportsISAExactly; void setSupportedISAs(CORINFO_InstructionSetFlags isas) { compSupportsISA = isas.GetFlagsRaw(); } unsigned compFlags; // method attributes unsigned instrCount; unsigned lvRefCount; codeOptimize compCodeOpt; // what type of code optimizations bool compUseCMOV; // optimize maximally and/or favor speed over size? #define DEFAULT_MIN_OPTS_CODE_SIZE 60000 #define DEFAULT_MIN_OPTS_INSTR_COUNT 20000 #define DEFAULT_MIN_OPTS_BB_COUNT 2000 #define DEFAULT_MIN_OPTS_LV_NUM_COUNT 2000 #define DEFAULT_MIN_OPTS_LV_REF_COUNT 8000 // Maximun number of locals before turning off the inlining #define MAX_LV_NUM_COUNT_FOR_INLINING 512 bool compMinOpts; bool compMinOptsIsSet; #ifdef DEBUG mutable bool compMinOptsIsUsed; bool MinOpts() const { assert(compMinOptsIsSet); compMinOptsIsUsed = true; return compMinOpts; } bool IsMinOptsSet() const { return compMinOptsIsSet; } #else // !DEBUG bool MinOpts() const { return compMinOpts; } bool IsMinOptsSet() const { return compMinOptsIsSet; } #endif // !DEBUG bool OptimizationDisabled() const { return MinOpts() || compDbgCode; } bool OptimizationEnabled() const { return !OptimizationDisabled(); } void SetMinOpts(bool val) { assert(!compMinOptsIsUsed); assert(!compMinOptsIsSet || (compMinOpts == val)); compMinOpts = val; compMinOptsIsSet = true; } // true if the CLFLG_* for an optimization is set. bool OptEnabled(unsigned optFlag) const { return !!(compFlags & optFlag); } #ifdef FEATURE_READYTORUN bool IsReadyToRun() const { return jitFlags->IsSet(JitFlags::JIT_FLAG_READYTORUN); } #else bool IsReadyToRun() const { return false; } #endif // Check if the compilation is control-flow guard enabled. bool IsCFGEnabled() const { #if defined(TARGET_ARM64) || defined(TARGET_AMD64) // On these platforms we assume the register that the target is // passed in is preserved by the validator and take care to get the // target from the register for the call (even in debug mode). static_assert_no_msg((RBM_VALIDATE_INDIRECT_CALL_TRASH & (1 << REG_VALIDATE_INDIRECT_CALL_ADDR)) == 0); if (JitConfig.JitForceControlFlowGuard()) return true; return jitFlags->IsSet(JitFlags::JIT_FLAG_ENABLE_CFG); #else // The remaining platforms are not supported and would require some // work to support. // // ARM32: // The ARM32 validator does not preserve any volatile registers // which means we have to take special care to allocate and use a // callee-saved register (reloading the target from memory is a // security issue). // // x86: // On x86 some VSD calls disassemble the call site and expect an // indirect call which is fundamentally incompatible with CFG. // This would require a different way to pass this information // through. // return false; #endif } #ifdef FEATURE_ON_STACK_REPLACEMENT bool IsOSR() const { return jitFlags->IsSet(JitFlags::JIT_FLAG_OSR); } #else bool IsOSR() const { return false; } #endif // true if we should use the PINVOKE_{BEGIN,END} helpers instead of generating // PInvoke transitions inline. Normally used by R2R, but also used when generating a reverse pinvoke frame, as // the current logic for frame setup initializes and pushes // the InlinedCallFrame before performing the Reverse PInvoke transition, which is invalid (as frames cannot // safely be pushed/popped while the thread is in a preemptive state.). bool ShouldUsePInvokeHelpers() { return jitFlags->IsSet(JitFlags::JIT_FLAG_USE_PINVOKE_HELPERS) || jitFlags->IsSet(JitFlags::JIT_FLAG_REVERSE_PINVOKE); } // true if we should use insert the REVERSE_PINVOKE_{ENTER,EXIT} helpers in the method // prolog/epilog bool IsReversePInvoke() { return jitFlags->IsSet(JitFlags::JIT_FLAG_REVERSE_PINVOKE); } bool compScopeInfo; // Generate the LocalVar info ? bool compDbgCode; // Generate debugger-friendly code? bool compDbgInfo; // Gather debugging info? bool compDbgEnC; #ifdef PROFILING_SUPPORTED bool compNoPInvokeInlineCB; #else static const bool compNoPInvokeInlineCB; #endif #ifdef DEBUG bool compGcChecks; // Check arguments and return values to ensure they are sane #endif #if defined(DEBUG) && defined(TARGET_XARCH) bool compStackCheckOnRet; // Check stack pointer on return to ensure it is correct. #endif // defined(DEBUG) && defined(TARGET_XARCH) #if defined(DEBUG) && defined(TARGET_X86) bool compStackCheckOnCall; // Check stack pointer after call to ensure it is correct. Only for x86. #endif // defined(DEBUG) && defined(TARGET_X86) bool compReloc; // Generate relocs for pointers in code, true for all ngen/prejit codegen #ifdef DEBUG #if defined(TARGET_XARCH) bool compEnablePCRelAddr; // Whether absolute addr be encoded as PC-rel offset by RyuJIT where possible #endif #endif // DEBUG #ifdef UNIX_AMD64_ABI // This flag is indicating if there is a need to align the frame. // On AMD64-Windows, if there are calls, 4 slots for the outgoing ars are allocated, except for // FastTailCall. This slots makes the frame size non-zero, so alignment logic will be called. // On AMD64-Unix, there are no such slots. There is a possibility to have calls in the method with frame size of // 0. The frame alignment logic won't kick in. This flags takes care of the AMD64-Unix case by remembering that // there are calls and making sure the frame alignment logic is executed. bool compNeedToAlignFrame; #endif // UNIX_AMD64_ABI bool compProcedureSplitting; // Separate cold code from hot code bool genFPorder; // Preserve FP order (operations are non-commutative) bool genFPopt; // Can we do frame-pointer-omission optimization? bool altJit; // True if we are an altjit and are compiling this method #ifdef OPT_CONFIG bool optRepeat; // Repeat optimizer phases k times #endif #ifdef DEBUG bool compProcedureSplittingEH; // Separate cold code from hot code for functions with EH bool dspCode; // Display native code generated bool dspEHTable; // Display the EH table reported to the VM bool dspDebugInfo; // Display the Debug info reported to the VM bool dspInstrs; // Display the IL instructions intermixed with the native code output bool dspLines; // Display source-code lines intermixed with native code output bool dmpHex; // Display raw bytes in hex of native code output bool varNames; // Display variables names in native code output bool disAsm; // Display native code as it is generated bool disAsmSpilled; // Display native code when any register spilling occurs bool disasmWithGC; // Display GC info interleaved with disassembly. bool disDiffable; // Makes the Disassembly code 'diff-able' bool disAddr; // Display process address next to each instruction in disassembly code bool disAlignment; // Display alignment boundaries in disassembly code bool disAsm2; // Display native code after it is generated using external disassembler bool dspOrder; // Display names of each of the methods that we ngen/jit bool dspUnwind; // Display the unwind info output bool dspDiffable; // Makes the Jit Dump 'diff-able' (currently uses same COMPlus_* flag as disDiffable) bool compLongAddress; // Force using large pseudo instructions for long address // (IF_LARGEJMP/IF_LARGEADR/IF_LARGLDC) bool dspGCtbls; // Display the GC tables #endif bool compExpandCallsEarly; // True if we should expand virtual call targets early for this method // Default numbers used to perform loop alignment. All the numbers are choosen // based on experimenting with various benchmarks. // Default minimum loop block weight required to enable loop alignment. #define DEFAULT_ALIGN_LOOP_MIN_BLOCK_WEIGHT 4 // By default a loop will be aligned at 32B address boundary to get better // performance as per architecture manuals. #define DEFAULT_ALIGN_LOOP_BOUNDARY 0x20 // For non-adaptive loop alignment, by default, only align a loop whose size is // at most 3 times the alignment block size. If the loop is bigger than that, it is most // likely complicated enough that loop alignment will not impact performance. #define DEFAULT_MAX_LOOPSIZE_FOR_ALIGN DEFAULT_ALIGN_LOOP_BOUNDARY * 3 #ifdef DEBUG // Loop alignment variables // If set, for non-adaptive alignment, ensure loop jmps are not on or cross alignment boundary. bool compJitAlignLoopForJcc; #endif // For non-adaptive alignment, minimum loop size (in bytes) for which alignment will be done. unsigned short compJitAlignLoopMaxCodeSize; // Minimum weight needed for the first block of a loop to make it a candidate for alignment. unsigned short compJitAlignLoopMinBlockWeight; // For non-adaptive alignment, address boundary (power of 2) at which loop alignment should // be done. By default, 32B. unsigned short compJitAlignLoopBoundary; // Padding limit to align a loop. unsigned short compJitAlignPaddingLimit; // If set, perform adaptive loop alignment that limits number of padding based on loop size. bool compJitAlignLoopAdaptive; // If set, tries to hide alignment instructions behind unconditional jumps. bool compJitHideAlignBehindJmp; #ifdef LATE_DISASM bool doLateDisasm; // Run the late disassembler #endif // LATE_DISASM #if DUMP_GC_TABLES && !defined(DEBUG) #pragma message("NOTE: this non-debug build has GC ptr table dumping always enabled!") static const bool dspGCtbls = true; #endif #ifdef PROFILING_SUPPORTED // Whether to emit Enter/Leave/TailCall hooks using a dummy stub (DummyProfilerELTStub()). // This option helps make the JIT behave as if it is running under a profiler. bool compJitELTHookEnabled; #endif // PROFILING_SUPPORTED #if FEATURE_TAILCALL_OPT // Whether opportunistic or implicit tail call optimization is enabled. bool compTailCallOpt; // Whether optimization of transforming a recursive tail call into a loop is enabled. bool compTailCallLoopOpt; #endif #if FEATURE_FASTTAILCALL // Whether fast tail calls are allowed. bool compFastTailCalls; #endif // FEATURE_FASTTAILCALL #if defined(TARGET_ARM64) // Decision about whether to save FP/LR registers with callee-saved registers (see // COMPlus_JitSaveFpLrWithCalleSavedRegisters). int compJitSaveFpLrWithCalleeSavedRegisters; #endif // defined(TARGET_ARM64) #ifdef CONFIGURABLE_ARM_ABI bool compUseSoftFP = false; #else #ifdef ARM_SOFTFP static const bool compUseSoftFP = true; #else // !ARM_SOFTFP static const bool compUseSoftFP = false; #endif // ARM_SOFTFP #endif // CONFIGURABLE_ARM_ABI } opts; static bool s_pAltJitExcludeAssembliesListInitialized; static AssemblyNamesList2* s_pAltJitExcludeAssembliesList; #ifdef DEBUG static bool s_pJitDisasmIncludeAssembliesListInitialized; static AssemblyNamesList2* s_pJitDisasmIncludeAssembliesList; static bool s_pJitFunctionFileInitialized; static MethodSet* s_pJitMethodSet; #endif // DEBUG #ifdef DEBUG // silence warning of cast to greater size. It is easier to silence than construct code the compiler is happy with, and // it is safe in this case #pragma warning(push) #pragma warning(disable : 4312) template <typename T> T dspPtr(T p) { return (p == ZERO) ? ZERO : (opts.dspDiffable ? T(0xD1FFAB1E) : p); } template <typename T> T dspOffset(T o) { return (o == ZERO) ? ZERO : (opts.dspDiffable ? T(0xD1FFAB1E) : o); } #pragma warning(pop) static int dspTreeID(GenTree* tree) { return tree->gtTreeID; } static void printStmtID(Statement* stmt) { assert(stmt != nullptr); printf(FMT_STMT, stmt->GetID()); } static void printTreeID(GenTree* tree) { if (tree == nullptr) { printf("[------]"); } else { printf("[%06d]", dspTreeID(tree)); } } const char* pgoSourceToString(ICorJitInfo::PgoSource p); const char* devirtualizationDetailToString(CORINFO_DEVIRTUALIZATION_DETAIL detail); #endif // DEBUG // clang-format off #define STRESS_MODES \ \ STRESS_MODE(NONE) \ \ /* "Variations" stress areas which we try to mix up with each other. */ \ /* These should not be exhaustively used as they might */ \ /* hide/trivialize other areas */ \ \ STRESS_MODE(REGS) \ STRESS_MODE(DBL_ALN) \ STRESS_MODE(LCL_FLDS) \ STRESS_MODE(UNROLL_LOOPS) \ STRESS_MODE(MAKE_CSE) \ STRESS_MODE(LEGACY_INLINE) \ STRESS_MODE(CLONE_EXPR) \ STRESS_MODE(USE_CMOV) \ STRESS_MODE(FOLD) \ STRESS_MODE(MERGED_RETURNS) \ STRESS_MODE(BB_PROFILE) \ STRESS_MODE(OPT_BOOLS_GC) \ STRESS_MODE(REMORPH_TREES) \ STRESS_MODE(64RSLT_MUL) \ STRESS_MODE(DO_WHILE_LOOPS) \ STRESS_MODE(MIN_OPTS) \ STRESS_MODE(REVERSE_FLAG) /* Will set GTF_REVERSE_OPS whenever we can */ \ STRESS_MODE(REVERSE_COMMA) /* Will reverse commas created with gtNewCommaNode */ \ STRESS_MODE(TAILCALL) /* Will make the call as a tailcall whenever legal */ \ STRESS_MODE(CATCH_ARG) /* Will spill catch arg */ \ STRESS_MODE(UNSAFE_BUFFER_CHECKS) \ STRESS_MODE(NULL_OBJECT_CHECK) \ STRESS_MODE(PINVOKE_RESTORE_ESP) \ STRESS_MODE(RANDOM_INLINE) \ STRESS_MODE(SWITCH_CMP_BR_EXPANSION) \ STRESS_MODE(GENERIC_VARN) \ STRESS_MODE(PROFILER_CALLBACKS) /* Will generate profiler hooks for ELT callbacks */ \ STRESS_MODE(BYREF_PROMOTION) /* Change undoPromotion decisions for byrefs */ \ STRESS_MODE(PROMOTE_FEWER_STRUCTS)/* Don't promote some structs that can be promoted */ \ STRESS_MODE(VN_BUDGET)/* Randomize the VN budget */ \ \ /* After COUNT_VARN, stress level 2 does all of these all the time */ \ \ STRESS_MODE(COUNT_VARN) \ \ /* "Check" stress areas that can be exhaustively used if we */ \ /* dont care about performance at all */ \ \ STRESS_MODE(FORCE_INLINE) /* Treat every method as AggressiveInlining */ \ STRESS_MODE(CHK_FLOW_UPDATE) \ STRESS_MODE(EMITTER) \ STRESS_MODE(CHK_REIMPORT) \ STRESS_MODE(FLATFP) \ STRESS_MODE(GENERIC_CHECK) \ STRESS_MODE(COUNT) enum compStressArea { #define STRESS_MODE(mode) STRESS_##mode, STRESS_MODES #undef STRESS_MODE }; // clang-format on #ifdef DEBUG static const LPCWSTR s_compStressModeNames[STRESS_COUNT + 1]; BYTE compActiveStressModes[STRESS_COUNT]; #endif // DEBUG #define MAX_STRESS_WEIGHT 100 bool compStressCompile(compStressArea stressArea, unsigned weightPercentage); bool compStressCompileHelper(compStressArea stressArea, unsigned weightPercentage); #ifdef DEBUG bool compInlineStress() { return compStressCompile(STRESS_LEGACY_INLINE, 50); } bool compRandomInlineStress() { return compStressCompile(STRESS_RANDOM_INLINE, 50); } bool compPromoteFewerStructs(unsigned lclNum); #endif // DEBUG bool compTailCallStress() { #ifdef DEBUG // Do not stress tailcalls in IL stubs as the runtime creates several IL // stubs to implement the tailcall mechanism, which would then // recursively create more IL stubs. return !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && (JitConfig.TailcallStress() != 0 || compStressCompile(STRESS_TAILCALL, 5)); #else return false; #endif } const char* compGetTieringName(bool wantShortName = false) const; const char* compGetStressMessage() const; codeOptimize compCodeOpt() const { #if 0 // Switching between size & speed has measurable throughput impact // (3.5% on NGen CoreLib when measured). It used to be enabled for // DEBUG, but should generate identical code between CHK & RET builds, // so that's not acceptable. // TODO-Throughput: Figure out what to do about size vs. speed & throughput. // Investigate the cause of the throughput regression. return opts.compCodeOpt; #else return BLENDED_CODE; #endif } //--------------------- Info about the procedure -------------------------- struct Info { COMP_HANDLE compCompHnd; CORINFO_MODULE_HANDLE compScopeHnd; CORINFO_CLASS_HANDLE compClassHnd; CORINFO_METHOD_HANDLE compMethodHnd; CORINFO_METHOD_INFO* compMethodInfo; bool hasCircularClassConstraints; bool hasCircularMethodConstraints; #if defined(DEBUG) || defined(LATE_DISASM) || DUMP_FLOWGRAPHS const char* compMethodName; const char* compClassName; const char* compFullName; double compPerfScore; int compMethodSuperPMIIndex; // useful when debugging under SuperPMI #endif // defined(DEBUG) || defined(LATE_DISASM) || DUMP_FLOWGRAPHS #if defined(DEBUG) || defined(INLINE_DATA) // Method hash is logically const, but computed // on first demand. mutable unsigned compMethodHashPrivate; unsigned compMethodHash() const; #endif // defined(DEBUG) || defined(INLINE_DATA) #ifdef PSEUDORANDOM_NOP_INSERTION // things for pseudorandom nop insertion unsigned compChecksum; CLRRandom compRNG; #endif // The following holds the FLG_xxxx flags for the method we're compiling. unsigned compFlags; // The following holds the class attributes for the method we're compiling. unsigned compClassAttr; const BYTE* compCode; IL_OFFSET compILCodeSize; // The IL code size IL_OFFSET compILImportSize; // Estimated amount of IL actually imported IL_OFFSET compILEntry; // The IL entry point (normally 0) PatchpointInfo* compPatchpointInfo; // Patchpoint data for OSR (normally nullptr) UNATIVE_OFFSET compNativeCodeSize; // The native code size, after instructions are issued. This // is less than (compTotalHotCodeSize + compTotalColdCodeSize) only if: // (1) the code is not hot/cold split, and we issued less code than we expected, or // (2) the code is hot/cold split, and we issued less code than we expected // in the cold section (the hot section will always be padded out to compTotalHotCodeSize). bool compIsStatic : 1; // Is the method static (no 'this' pointer)? bool compIsVarArgs : 1; // Does the method have varargs parameters? bool compInitMem : 1; // Is the CORINFO_OPT_INIT_LOCALS bit set in the method info options? bool compProfilerCallback : 1; // JIT inserted a profiler Enter callback bool compPublishStubParam : 1; // EAX captured in prolog will be available through an intrinsic bool compHasNextCallRetAddr : 1; // The NextCallReturnAddress intrinsic is used. var_types compRetType; // Return type of the method as declared in IL var_types compRetNativeType; // Normalized return type as per target arch ABI unsigned compILargsCount; // Number of arguments (incl. implicit but not hidden) unsigned compArgsCount; // Number of arguments (incl. implicit and hidden) #if FEATURE_FASTTAILCALL unsigned compArgStackSize; // Incoming argument stack size in bytes #endif // FEATURE_FASTTAILCALL unsigned compRetBuffArg; // position of hidden return param var (0, 1) (BAD_VAR_NUM means not present); int compTypeCtxtArg; // position of hidden param for type context for generic code (CORINFO_CALLCONV_PARAMTYPE) unsigned compThisArg; // position of implicit this pointer param (not to be confused with lvaArg0Var) unsigned compILlocalsCount; // Number of vars : args + locals (incl. implicit but not hidden) unsigned compLocalsCount; // Number of vars : args + locals (incl. implicit and hidden) unsigned compMaxStack; UNATIVE_OFFSET compTotalHotCodeSize; // Total number of bytes of Hot Code in the method UNATIVE_OFFSET compTotalColdCodeSize; // Total number of bytes of Cold Code in the method unsigned compUnmanagedCallCountWithGCTransition; // count of unmanaged calls with GC transition. CorInfoCallConvExtension compCallConv; // The entry-point calling convention for this method. unsigned compLvFrameListRoot; // lclNum for the Frame root unsigned compXcptnsCount; // Number of exception-handling clauses read in the method's IL. // You should generally use compHndBBtabCount instead: it is the // current number of EH clauses (after additions like synchronized // methods and funclets, and removals like unreachable code deletion). Target::ArgOrder compArgOrder; bool compMatchedVM; // true if the VM is "matched": either the JIT is a cross-compiler // and the VM expects that, or the JIT is a "self-host" compiler // (e.g., x86 hosted targeting x86) and the VM expects that. /* The following holds IL scope information about local variables. */ unsigned compVarScopesCount; VarScopeDsc* compVarScopes; /* The following holds information about instr offsets for * which we need to report IP-mappings */ IL_OFFSET* compStmtOffsets; // sorted unsigned compStmtOffsetsCount; ICorDebugInfo::BoundaryTypes compStmtOffsetsImplicit; #define CPU_X86 0x0100 // The generic X86 CPU #define CPU_X86_PENTIUM_4 0x0110 #define CPU_X64 0x0200 // The generic x64 CPU #define CPU_AMD_X64 0x0210 // AMD x64 CPU #define CPU_INTEL_X64 0x0240 // Intel x64 CPU #define CPU_ARM 0x0300 // The generic ARM CPU #define CPU_ARM64 0x0400 // The generic ARM64 CPU unsigned genCPU; // What CPU are we running on // Number of class profile probes in this method unsigned compClassProbeCount; } info; // Returns true if the method being compiled returns a non-void and non-struct value. // Note that lvaInitTypeRef() normalizes compRetNativeType for struct returns in a // single register as per target arch ABI (e.g on Amd64 Windows structs of size 1, 2, // 4 or 8 gets normalized to TYP_BYTE/TYP_SHORT/TYP_INT/TYP_LONG; On Arm HFA structs). // Methods returning such structs are considered to return non-struct return value and // this method returns true in that case. bool compMethodReturnsNativeScalarType() { return (info.compRetType != TYP_VOID) && !varTypeIsStruct(info.compRetNativeType); } // Returns true if the method being compiled returns RetBuf addr as its return value bool compMethodReturnsRetBufAddr() { // There are cases where implicit RetBuf argument should be explicitly returned in a register. // In such cases the return type is changed to TYP_BYREF and appropriate IR is generated. // These cases are: CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_AMD64 // 1. on x64 Windows and Unix the address of RetBuf needs to be returned by // methods with hidden RetBufArg in RAX. In such case GT_RETURN is of TYP_BYREF, // returning the address of RetBuf. return (info.compRetBuffArg != BAD_VAR_NUM); #else // TARGET_AMD64 #ifdef PROFILING_SUPPORTED // 2. Profiler Leave callback expects the address of retbuf as return value for // methods with hidden RetBuf argument. impReturnInstruction() when profiler // callbacks are needed creates GT_RETURN(TYP_BYREF, op1 = Addr of RetBuf) for // methods with hidden RetBufArg. if (compIsProfilerHookNeeded()) { return (info.compRetBuffArg != BAD_VAR_NUM); } #endif // 3. Windows ARM64 native instance calling convention requires the address of RetBuff // to be returned in x0. CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_ARM64) if (TargetOS::IsWindows) { auto callConv = info.compCallConv; if (callConvIsInstanceMethodCallConv(callConv)) { return (info.compRetBuffArg != BAD_VAR_NUM); } } #endif // TARGET_ARM64 // 4. x86 unmanaged calling conventions require the address of RetBuff to be returned in eax. CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_X86) if (info.compCallConv != CorInfoCallConvExtension::Managed) { return (info.compRetBuffArg != BAD_VAR_NUM); } #endif return false; #endif // TARGET_AMD64 } // Returns true if the method returns a value in more than one return register // TODO-ARM-Bug: Deal with multi-register genReturnLocaled structs? // TODO-ARM64: Does this apply for ARM64 too? bool compMethodReturnsMultiRegRetType() { #if FEATURE_MULTIREG_RET #if defined(TARGET_X86) // On x86, 64-bit longs and structs are returned in multiple registers return varTypeIsLong(info.compRetNativeType) || (varTypeIsStruct(info.compRetNativeType) && (info.compRetBuffArg == BAD_VAR_NUM)); #else // targets: X64-UNIX, ARM64 or ARM32 // On all other targets that support multireg return values: // Methods returning a struct in multiple registers have a return value of TYP_STRUCT. // Such method's compRetNativeType is TYP_STRUCT without a hidden RetBufArg return varTypeIsStruct(info.compRetNativeType) && (info.compRetBuffArg == BAD_VAR_NUM); #endif // TARGET_XXX #else // not FEATURE_MULTIREG_RET // For this architecture there are no multireg returns return false; #endif // FEATURE_MULTIREG_RET } bool compEnregLocals() { return ((opts.compFlags & CLFLG_REGVAR) != 0); } bool compEnregStructLocals() { return (JitConfig.JitEnregStructLocals() != 0); } bool compObjectStackAllocation() { return (JitConfig.JitObjectStackAllocation() != 0); } // Returns true if the method returns a value in more than one return register, // it should replace/be merged with compMethodReturnsMultiRegRetType when #36868 is fixed. // The difference from original `compMethodReturnsMultiRegRetType` is in ARM64 SIMD* handling, // this method correctly returns false for it (it is passed as HVA), when the original returns true. bool compMethodReturnsMultiRegRegTypeAlternate() { #if FEATURE_MULTIREG_RET #if defined(TARGET_X86) // On x86, 64-bit longs and structs are returned in multiple registers return varTypeIsLong(info.compRetNativeType) || (varTypeIsStruct(info.compRetNativeType) && (info.compRetBuffArg == BAD_VAR_NUM)); #else // targets: X64-UNIX, ARM64 or ARM32 #if defined(TARGET_ARM64) // TYP_SIMD* are returned in one register. if (varTypeIsSIMD(info.compRetNativeType)) { return false; } #endif // On all other targets that support multireg return values: // Methods returning a struct in multiple registers have a return value of TYP_STRUCT. // Such method's compRetNativeType is TYP_STRUCT without a hidden RetBufArg return varTypeIsStruct(info.compRetNativeType) && (info.compRetBuffArg == BAD_VAR_NUM); #endif // TARGET_XXX #else // not FEATURE_MULTIREG_RET // For this architecture there are no multireg returns return false; #endif // FEATURE_MULTIREG_RET } // Returns true if the method being compiled returns a value bool compMethodHasRetVal() { return compMethodReturnsNativeScalarType() || compMethodReturnsRetBufAddr() || compMethodReturnsMultiRegRetType(); } // Returns true if the method requires a PInvoke prolog and epilog bool compMethodRequiresPInvokeFrame() { return (info.compUnmanagedCallCountWithGCTransition > 0); } // Returns true if address-exposed user variables should be poisoned with a recognizable value bool compShouldPoisonFrame() { #ifdef FEATURE_ON_STACK_REPLACEMENT if (opts.IsOSR()) return false; #endif return !info.compInitMem && opts.compDbgCode; } // Returns true if the jit supports having patchpoints in this method. // Optionally, get the reason why not. bool compCanHavePatchpoints(const char** reason = nullptr); #if defined(DEBUG) void compDispLocalVars(); #endif // DEBUG private: class ClassLayoutTable* m_classLayoutTable; class ClassLayoutTable* typCreateClassLayoutTable(); class ClassLayoutTable* typGetClassLayoutTable(); public: // Get the layout having the specified layout number. ClassLayout* typGetLayoutByNum(unsigned layoutNum); // Get the layout number of the specified layout. unsigned typGetLayoutNum(ClassLayout* layout); // Get the layout having the specified size but no class handle. ClassLayout* typGetBlkLayout(unsigned blockSize); // Get the number of a layout having the specified size but no class handle. unsigned typGetBlkLayoutNum(unsigned blockSize); // Get the layout for the specified class handle. ClassLayout* typGetObjLayout(CORINFO_CLASS_HANDLE classHandle); // Get the number of a layout for the specified class handle. unsigned typGetObjLayoutNum(CORINFO_CLASS_HANDLE classHandle); //-------------------------- Global Compiler Data ------------------------------------ #ifdef DEBUG private: static LONG s_compMethodsCount; // to produce unique label names #endif public: #ifdef DEBUG LONG compMethodID; unsigned compGenTreeID; unsigned compStatementID; unsigned compBasicBlockID; #endif BasicBlock* compCurBB; // the current basic block in process Statement* compCurStmt; // the current statement in process GenTree* compCurTree; // the current tree in process // The following is used to create the 'method JIT info' block. size_t compInfoBlkSize; BYTE* compInfoBlkAddr; EHblkDsc* compHndBBtab; // array of EH data unsigned compHndBBtabCount; // element count of used elements in EH data array unsigned compHndBBtabAllocCount; // element count of allocated elements in EH data array #if defined(TARGET_X86) //------------------------------------------------------------------------- // Tracking of region covered by the monitor in synchronized methods void* syncStartEmitCookie; // the emitter cookie for first instruction after the call to MON_ENTER void* syncEndEmitCookie; // the emitter cookie for first instruction after the call to MON_EXIT #endif // !TARGET_X86 Phases mostRecentlyActivePhase; // the most recently active phase PhaseChecks activePhaseChecks; // the currently active phase checks //------------------------------------------------------------------------- // The following keeps track of how many bytes of local frame space we've // grabbed so far in the current function, and how many argument bytes we // need to pop when we return. // unsigned compLclFrameSize; // secObject+lclBlk+locals+temps // Count of callee-saved regs we pushed in the prolog. // Does not include EBP for isFramePointerUsed() and double-aligned frames. // In case of Amd64 this doesn't include float regs saved on stack. unsigned compCalleeRegsPushed; #if defined(TARGET_XARCH) // Mask of callee saved float regs on stack. regMaskTP compCalleeFPRegsSavedMask; #endif #ifdef TARGET_AMD64 // Quirk for VS debug-launch scenario to work: // Bytes of padding between save-reg area and locals. #define VSQUIRK_STACK_PAD (2 * REGSIZE_BYTES) unsigned compVSQuirkStackPaddingNeeded; #endif unsigned compArgSize; // total size of arguments in bytes (including register args (lvIsRegArg)) unsigned compMapILargNum(unsigned ILargNum); // map accounting for hidden args unsigned compMapILvarNum(unsigned ILvarNum); // map accounting for hidden args unsigned compMap2ILvarNum(unsigned varNum) const; // map accounting for hidden args #if defined(TARGET_ARM64) struct FrameInfo { // Frame type (1-5) int frameType; // Distance from established (method body) SP to base of callee save area int calleeSaveSpOffset; // Amount to subtract from SP before saving (prolog) OR // to add to SP after restoring (epilog) callee saves int calleeSaveSpDelta; // Distance from established SP to where caller's FP was saved int offsetSpToSavedFp; } compFrameInfo; #endif //------------------------------------------------------------------------- static void compStartup(); // One-time initialization static void compShutdown(); // One-time finalization void compInit(ArenaAllocator* pAlloc, CORINFO_METHOD_HANDLE methodHnd, COMP_HANDLE compHnd, CORINFO_METHOD_INFO* methodInfo, InlineInfo* inlineInfo); void compDone(); static void compDisplayStaticSizes(FILE* fout); //------------ Some utility functions -------------- void* compGetHelperFtn(CorInfoHelpFunc ftnNum, /* IN */ void** ppIndirection); /* OUT */ // Several JIT/EE interface functions return a CorInfoType, and also return a // class handle as an out parameter if the type is a value class. Returns the // size of the type these describe. unsigned compGetTypeSize(CorInfoType cit, CORINFO_CLASS_HANDLE clsHnd); // Returns true if the method being compiled has a return buffer. bool compHasRetBuffArg(); #ifdef DEBUG // Components used by the compiler may write unit test suites, and // have them run within this method. They will be run only once per process, and only // in debug. (Perhaps should be under the control of a COMPlus_ flag.) // These should fail by asserting. void compDoComponentUnitTestsOnce(); #endif // DEBUG int compCompile(CORINFO_MODULE_HANDLE classPtr, void** methodCodePtr, uint32_t* methodCodeSize, JitFlags* compileFlags); void compCompileFinish(); int compCompileHelper(CORINFO_MODULE_HANDLE classPtr, COMP_HANDLE compHnd, CORINFO_METHOD_INFO* methodInfo, void** methodCodePtr, uint32_t* methodCodeSize, JitFlags* compileFlag); ArenaAllocator* compGetArenaAllocator(); void generatePatchpointInfo(); #if MEASURE_MEM_ALLOC static bool s_dspMemStats; // Display per-phase memory statistics for every function #endif // MEASURE_MEM_ALLOC #if LOOP_HOIST_STATS unsigned m_loopsConsidered; bool m_curLoopHasHoistedExpression; unsigned m_loopsWithHoistedExpressions; unsigned m_totalHoistedExpressions; void AddLoopHoistStats(); void PrintPerMethodLoopHoistStats(); static CritSecObject s_loopHoistStatsLock; // This lock protects the data structures below. static unsigned s_loopsConsidered; static unsigned s_loopsWithHoistedExpressions; static unsigned s_totalHoistedExpressions; static void PrintAggregateLoopHoistStats(FILE* f); #endif // LOOP_HOIST_STATS #if TRACK_ENREG_STATS class EnregisterStats { private: unsigned m_totalNumberOfVars; unsigned m_totalNumberOfStructVars; unsigned m_totalNumberOfEnregVars; unsigned m_totalNumberOfStructEnregVars; unsigned m_addrExposed; unsigned m_VMNeedsStackAddr; unsigned m_localField; unsigned m_blockOp; unsigned m_dontEnregStructs; unsigned m_notRegSizeStruct; unsigned m_structArg; unsigned m_lclAddrNode; unsigned m_castTakesAddr; unsigned m_storeBlkSrc; unsigned m_oneAsgRetyping; unsigned m_swizzleArg; unsigned m_blockOpRet; unsigned m_returnSpCheck; unsigned m_simdUserForcesDep; unsigned m_liveInOutHndlr; unsigned m_depField; unsigned m_noRegVars; unsigned m_minOptsGC; #ifdef JIT32_GCENCODER unsigned m_PinningRef; #endif // JIT32_GCENCODER #if !defined(TARGET_64BIT) unsigned m_longParamField; #endif // !TARGET_64BIT unsigned m_parentExposed; unsigned m_tooConservative; unsigned m_escapeAddress; unsigned m_osrExposed; unsigned m_stressLclFld; unsigned m_copyFldByFld; unsigned m_dispatchRetBuf; unsigned m_wideIndir; public: void RecordLocal(const LclVarDsc* varDsc); void Dump(FILE* fout) const; }; static EnregisterStats s_enregisterStats; #endif // TRACK_ENREG_STATS bool compIsForImportOnly(); bool compIsForInlining() const; bool compDonotInline(); #ifdef DEBUG // Get the default fill char value we randomize this value when JitStress is enabled. static unsigned char compGetJitDefaultFill(Compiler* comp); const char* compLocalVarName(unsigned varNum, unsigned offs); VarName compVarName(regNumber reg, bool isFloatReg = false); const char* compRegVarName(regNumber reg, bool displayVar = false, bool isFloatReg = false); const char* compRegNameForSize(regNumber reg, size_t size); const char* compFPregVarName(unsigned fpReg, bool displayVar = false); void compDspSrcLinesByNativeIP(UNATIVE_OFFSET curIP); void compDspSrcLinesByLineNum(unsigned line, bool seek = false); #endif // DEBUG //------------------------------------------------------------------------- struct VarScopeListNode { VarScopeDsc* data; VarScopeListNode* next; static VarScopeListNode* Create(VarScopeDsc* value, CompAllocator alloc) { VarScopeListNode* node = new (alloc) VarScopeListNode; node->data = value; node->next = nullptr; return node; } }; struct VarScopeMapInfo { VarScopeListNode* head; VarScopeListNode* tail; static VarScopeMapInfo* Create(VarScopeListNode* node, CompAllocator alloc) { VarScopeMapInfo* info = new (alloc) VarScopeMapInfo; info->head = node; info->tail = node; return info; } }; // Max value of scope count for which we would use linear search; for larger values we would use hashtable lookup. static const unsigned MAX_LINEAR_FIND_LCL_SCOPELIST = 32; typedef JitHashTable<unsigned, JitSmallPrimitiveKeyFuncs<unsigned>, VarScopeMapInfo*> VarNumToScopeDscMap; // Map to keep variables' scope indexed by varNum containing it's scope dscs at the index. VarNumToScopeDscMap* compVarScopeMap; VarScopeDsc* compFindLocalVar(unsigned varNum, unsigned lifeBeg, unsigned lifeEnd); VarScopeDsc* compFindLocalVar(unsigned varNum, unsigned offs); VarScopeDsc* compFindLocalVarLinear(unsigned varNum, unsigned offs); void compInitVarScopeMap(); VarScopeDsc** compEnterScopeList; // List has the offsets where variables // enter scope, sorted by instr offset unsigned compNextEnterScope; VarScopeDsc** compExitScopeList; // List has the offsets where variables // go out of scope, sorted by instr offset unsigned compNextExitScope; void compInitScopeLists(); void compResetScopeLists(); VarScopeDsc* compGetNextEnterScope(unsigned offs, bool scan = false); VarScopeDsc* compGetNextExitScope(unsigned offs, bool scan = false); void compProcessScopesUntil(unsigned offset, VARSET_TP* inScope, void (Compiler::*enterScopeFn)(VARSET_TP* inScope, VarScopeDsc*), void (Compiler::*exitScopeFn)(VARSET_TP* inScope, VarScopeDsc*)); #ifdef DEBUG void compDispScopeLists(); #endif // DEBUG bool compIsProfilerHookNeeded(); //------------------------------------------------------------------------- /* Statistical Data Gathering */ void compJitStats(); // call this function and enable // various ifdef's below for statistical data #if CALL_ARG_STATS void compCallArgStats(); static void compDispCallArgStats(FILE* fout); #endif //------------------------------------------------------------------------- protected: #ifdef DEBUG bool skipMethod(); #endif ArenaAllocator* compArenaAllocator; public: void compFunctionTraceStart(); void compFunctionTraceEnd(void* methodCodePtr, ULONG methodCodeSize, bool isNYI); protected: size_t compMaxUncheckedOffsetForNullObject; void compInitOptions(JitFlags* compileFlags); void compSetProcessor(); void compInitDebuggingInfo(); void compSetOptimizationLevel(); #ifdef TARGET_ARMARCH bool compRsvdRegCheck(FrameLayoutState curState); #endif void compCompile(void** methodCodePtr, uint32_t* methodCodeSize, JitFlags* compileFlags); // Clear annotations produced during optimizations; to be used between iterations when repeating opts. void ResetOptAnnotations(); // Regenerate loop descriptors; to be used between iterations when repeating opts. void RecomputeLoopInfo(); #ifdef PROFILING_SUPPORTED // Data required for generating profiler Enter/Leave/TailCall hooks bool compProfilerHookNeeded; // Whether profiler Enter/Leave/TailCall hook needs to be generated for the method void* compProfilerMethHnd; // Profiler handle of the method being compiled. Passed as param to ELT callbacks bool compProfilerMethHndIndirected; // Whether compProfilerHandle is pointer to the handle or is an actual handle #endif public: // Assumes called as part of process shutdown; does any compiler-specific work associated with that. static void ProcessShutdownWork(ICorStaticInfo* statInfo); CompAllocator getAllocator(CompMemKind cmk = CMK_Generic) { return CompAllocator(compArenaAllocator, cmk); } CompAllocator getAllocatorGC() { return getAllocator(CMK_GC); } CompAllocator getAllocatorLoopHoist() { return getAllocator(CMK_LoopHoist); } #ifdef DEBUG CompAllocator getAllocatorDebugOnly() { return getAllocator(CMK_DebugOnly); } #endif // DEBUG /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX typeInfo XX XX XX XX Checks for type compatibility and merges types XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: // Returns true if child is equal to or a subtype of parent for merge purposes // This support is necessary to suport attributes that are not described in // for example, signatures. For example, the permanent home byref (byref that // points to the gc heap), isn't a property of method signatures, therefore, // it is safe to have mismatches here (that tiCompatibleWith will not flag), // but when deciding if we need to reimport a block, we need to take these // in account bool tiMergeCompatibleWith(const typeInfo& pChild, const typeInfo& pParent, bool normalisedForStack) const; // Returns true if child is equal to or a subtype of parent. // normalisedForStack indicates that both types are normalised for the stack bool tiCompatibleWith(const typeInfo& pChild, const typeInfo& pParent, bool normalisedForStack) const; // Merges pDest and pSrc. Returns false if merge is undefined. // *pDest is modified to represent the merged type. Sets "*changed" to true // if this changes "*pDest". bool tiMergeToCommonParent(typeInfo* pDest, const typeInfo* pSrc, bool* changed) const; /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX IL verification stuff XX XX XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: // The following is used to track liveness of local variables, initialization // of valueclass constructors, and type safe use of IL instructions. // dynamic state info needed for verification EntryState verCurrentState; // this ptr of object type .ctors are considered intited only after // the base class ctor is called, or an alternate ctor is called. // An uninited this ptr can be used to access fields, but cannot // be used to call a member function. bool verTrackObjCtorInitState; void verInitBBEntryState(BasicBlock* block, EntryState* currentState); // Requires that "tis" is not TIS_Bottom -- it's a definite init/uninit state. void verSetThisInit(BasicBlock* block, ThisInitState tis); void verInitCurrentState(); void verResetCurrentState(BasicBlock* block, EntryState* currentState); // Merges the current verification state into the entry state of "block", return false if that merge fails, // TRUE if it succeeds. Further sets "*changed" to true if this changes the entry state of "block". bool verMergeEntryStates(BasicBlock* block, bool* changed); void verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg)); void verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool logMsg)); typeInfo verMakeTypeInfo(CORINFO_CLASS_HANDLE clsHnd, bool bashStructToRef = false); // converts from jit type representation to typeInfo typeInfo verMakeTypeInfo(CorInfoType ciType, CORINFO_CLASS_HANDLE clsHnd); // converts from jit type representation to typeInfo bool verIsSDArray(const typeInfo& ti); typeInfo verGetArrayElemType(const typeInfo& ti); typeInfo verParseArgSigToTypeInfo(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE args); bool verIsByRefLike(const typeInfo& ti); bool verIsSafeToReturnByRef(const typeInfo& ti); // generic type variables range over types that satisfy IsBoxable bool verIsBoxable(const typeInfo& ti); void DECLSPEC_NORETURN verRaiseVerifyException(INDEBUG(const char* reason) DEBUGARG(const char* file) DEBUGARG(unsigned line)); void verRaiseVerifyExceptionIfNeeded(INDEBUG(const char* reason) DEBUGARG(const char* file) DEBUGARG(unsigned line)); bool verCheckTailCallConstraint(OPCODE opcode, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call // on a type parameter? bool speculative // If true, won't throw if verificatoin fails. Instead it will // return false to the caller. // If false, it will throw. ); bool verIsBoxedValueType(const typeInfo& ti); void verVerifyCall(OPCODE opcode, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, bool tailCall, bool readonlyCall, // is this a "readonly." call? const BYTE* delegateCreateStart, const BYTE* codeAddr, CORINFO_CALL_INFO* callInfo DEBUGARG(const char* methodName)); bool verCheckDelegateCreation(const BYTE* delegateCreateStart, const BYTE* codeAddr, mdMemberRef& targetMemberRef); typeInfo verVerifySTIND(const typeInfo& ptr, const typeInfo& value, const typeInfo& instrType); typeInfo verVerifyLDIND(const typeInfo& ptr, const typeInfo& instrType); void verVerifyField(CORINFO_RESOLVED_TOKEN* pResolvedToken, const CORINFO_FIELD_INFO& fieldInfo, const typeInfo* tiThis, bool mutator, bool allowPlainStructAsThis = false); void verVerifyCond(const typeInfo& tiOp1, const typeInfo& tiOp2, unsigned opcode); void verVerifyThisPtrInitialised(); bool verIsCallToInitThisPtr(CORINFO_CLASS_HANDLE context, CORINFO_CLASS_HANDLE target); #ifdef DEBUG // One line log function. Default level is 0. Increasing it gives you // more log information // levels are currently unused: #define JITDUMP(level,...) (); void JitLogEE(unsigned level, const char* fmt, ...); bool compDebugBreak; bool compJitHaltMethod(); #endif /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX GS Security checks for unsafe buffers XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: struct ShadowParamVarInfo { FixedBitVect* assignGroup; // the closure set of variables whose values depend on each other unsigned shadowCopy; // Lcl var num, if not valid set to BAD_VAR_NUM static bool mayNeedShadowCopy(LclVarDsc* varDsc) { #if defined(TARGET_AMD64) // GS cookie logic to create shadow slots, create trees to copy reg args to shadow // slots and update all trees to refer to shadow slots is done immediately after // fgMorph(). Lsra could potentially mark a param as DoNotEnregister after JIT determines // not to shadow a parameter. Also, LSRA could potentially spill a param which is passed // in register. Therefore, conservatively all params may need a shadow copy. Note that // GS cookie logic further checks whether the param is a ptr or an unsafe buffer before // creating a shadow slot even though this routine returns true. // // TODO-AMD64-CQ: Revisit this conservative approach as it could create more shadow slots than // required. There are two cases under which a reg arg could potentially be used from its // home location: // a) LSRA marks it as DoNotEnregister (see LinearScan::identifyCandidates()) // b) LSRA spills it // // Possible solution to address case (a) // - The conditions under which LSRA marks a varDsc as DoNotEnregister could be checked // in this routine. Note that live out of exception handler is something we may not be // able to do it here since GS cookie logic is invoked ahead of liveness computation. // Therefore, for methods with exception handling and need GS cookie check we might have // to take conservative approach. // // Possible solution to address case (b) // - Whenver a parameter passed in an argument register needs to be spilled by LSRA, we // create a new spill temp if the method needs GS cookie check. return varDsc->lvIsParam; #else // !defined(TARGET_AMD64) return varDsc->lvIsParam && !varDsc->lvIsRegArg; #endif } #ifdef DEBUG void Print() { printf("assignGroup [%p]; shadowCopy: [%d];\n", assignGroup, shadowCopy); } #endif }; GSCookie* gsGlobalSecurityCookieAddr; // Address of global cookie for unsafe buffer checks GSCookie gsGlobalSecurityCookieVal; // Value of global cookie if addr is NULL ShadowParamVarInfo* gsShadowVarInfo; // Table used by shadow param analysis code void gsGSChecksInitCookie(); // Grabs cookie variable void gsCopyShadowParams(); // Identify vulnerable params and create dhadow copies bool gsFindVulnerableParams(); // Shadow param analysis code void gsParamsToShadows(); // Insert copy code and replave param uses by shadow static fgWalkPreFn gsMarkPtrsAndAssignGroups; // Shadow param analysis tree-walk static fgWalkPreFn gsReplaceShadowParams; // Shadow param replacement tree-walk #define DEFAULT_MAX_INLINE_SIZE 100 // Methods with > DEFAULT_MAX_INLINE_SIZE IL bytes will never be inlined. // This can be overwritten by setting complus_JITInlineSize env variable. #define DEFAULT_MAX_INLINE_DEPTH 20 // Methods at more than this level deep will not be inlined #define DEFAULT_MAX_LOCALLOC_TO_LOCAL_SIZE 32 // fixed locallocs of this size or smaller will convert to local buffers private: #ifdef FEATURE_JIT_METHOD_PERF JitTimer* pCompJitTimer; // Timer data structure (by phases) for current compilation. static CompTimeSummaryInfo s_compJitTimerSummary; // Summary of the Timer information for the whole run. static LPCWSTR JitTimeLogCsv(); // Retrieve the file name for CSV from ConfigDWORD. static LPCWSTR compJitTimeLogFilename; // If a log file for JIT time is desired, filename to write it to. #endif void BeginPhase(Phases phase); // Indicate the start of the given phase. void EndPhase(Phases phase); // Indicate the end of the given phase. #if MEASURE_CLRAPI_CALLS // Thin wrappers that call into JitTimer (if present). inline void CLRApiCallEnter(unsigned apix); inline void CLRApiCallLeave(unsigned apix); public: inline void CLR_API_Enter(API_ICorJitInfo_Names ename); inline void CLR_API_Leave(API_ICorJitInfo_Names ename); private: #endif #if defined(DEBUG) || defined(INLINE_DATA) // These variables are associated with maintaining SQM data about compile time. unsigned __int64 m_compCyclesAtEndOfInlining; // The thread-virtualized cycle count at the end of the inlining phase // in the current compilation. unsigned __int64 m_compCycles; // Net cycle count for current compilation DWORD m_compTickCountAtEndOfInlining; // The result of GetTickCount() (# ms since some epoch marker) at the end of // the inlining phase in the current compilation. #endif // defined(DEBUG) || defined(INLINE_DATA) // Records the SQM-relevant (cycles and tick count). Should be called after inlining is complete. // (We do this after inlining because this marks the last point at which the JIT is likely to cause // type-loading and class initialization). void RecordStateAtEndOfInlining(); // Assumes being called at the end of compilation. Update the SQM state. void RecordStateAtEndOfCompilation(); public: #if FUNC_INFO_LOGGING static LPCWSTR compJitFuncInfoFilename; // If a log file for per-function information is required, this is the // filename to write it to. static FILE* compJitFuncInfoFile; // And this is the actual FILE* to write to. #endif // FUNC_INFO_LOGGING Compiler* prevCompiler; // Previous compiler on stack for TLS Compiler* linked list for reentrant compilers. #if MEASURE_NOWAY void RecordNowayAssert(const char* filename, unsigned line, const char* condStr); #endif // MEASURE_NOWAY #ifndef FEATURE_TRACELOGGING // Should we actually fire the noway assert body and the exception handler? bool compShouldThrowOnNoway(); #else // FEATURE_TRACELOGGING // Should we actually fire the noway assert body and the exception handler? bool compShouldThrowOnNoway(const char* filename, unsigned line); // Telemetry instance to use per method compilation. JitTelemetry compJitTelemetry; // Get common parameters that have to be logged with most telemetry data. void compGetTelemetryDefaults(const char** assemblyName, const char** scopeName, const char** methodName, unsigned* methodHash); #endif // !FEATURE_TRACELOGGING #ifdef DEBUG private: NodeToTestDataMap* m_nodeTestData; static const unsigned FIRST_LOOP_HOIST_CSE_CLASS = 1000; unsigned m_loopHoistCSEClass; // LoopHoist test annotations turn into CSE requirements; we // label them with CSE Class #'s starting at FIRST_LOOP_HOIST_CSE_CLASS. // Current kept in this. public: NodeToTestDataMap* GetNodeTestData() { Compiler* compRoot = impInlineRoot(); if (compRoot->m_nodeTestData == nullptr) { compRoot->m_nodeTestData = new (getAllocatorDebugOnly()) NodeToTestDataMap(getAllocatorDebugOnly()); } return compRoot->m_nodeTestData; } typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, int> NodeToIntMap; // Returns the set (i.e., the domain of the result map) of nodes that are keys in m_nodeTestData, and // currently occur in the AST graph. NodeToIntMap* FindReachableNodesInNodeTestData(); // Node "from" is being eliminated, and being replaced by node "to". If "from" had any associated // test data, associate that data with "to". void TransferTestDataToNode(GenTree* from, GenTree* to); // These are the methods that test that the various conditions implied by the // test attributes are satisfied. void JitTestCheckSSA(); // SSA builder tests. void JitTestCheckVN(); // Value numbering tests. #endif // DEBUG // The "FieldSeqStore", for canonicalizing field sequences. See the definition of FieldSeqStore for // operations. FieldSeqStore* m_fieldSeqStore; FieldSeqStore* GetFieldSeqStore() { Compiler* compRoot = impInlineRoot(); if (compRoot->m_fieldSeqStore == nullptr) { // Create a CompAllocator that labels sub-structure with CMK_FieldSeqStore, and use that for allocation. CompAllocator ialloc(getAllocator(CMK_FieldSeqStore)); compRoot->m_fieldSeqStore = new (ialloc) FieldSeqStore(ialloc); } return compRoot->m_fieldSeqStore; } typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, FieldSeqNode*> NodeToFieldSeqMap; // Some nodes of "TYP_BYREF" or "TYP_I_IMPL" actually represent the address of a field within a struct, but since // the offset of the field is zero, there's no "GT_ADD" node. We normally attach a field sequence to the constant // that is added, but what do we do when that constant is zero, and is thus not present? We use this mechanism to // attach the field sequence directly to the address node. NodeToFieldSeqMap* m_zeroOffsetFieldMap; NodeToFieldSeqMap* GetZeroOffsetFieldMap() { // Don't need to worry about inlining here if (m_zeroOffsetFieldMap == nullptr) { // Create a CompAllocator that labels sub-structure with CMK_ZeroOffsetFieldMap, and use that for // allocation. CompAllocator ialloc(getAllocator(CMK_ZeroOffsetFieldMap)); m_zeroOffsetFieldMap = new (ialloc) NodeToFieldSeqMap(ialloc); } return m_zeroOffsetFieldMap; } // Requires that "op1" is a node of type "TYP_BYREF" or "TYP_I_IMPL". We are dereferencing this with the fields in // "fieldSeq", whose offsets are required all to be zero. Ensures that any field sequence annotation currently on // "op1" or its components is augmented by appending "fieldSeq". In practice, if "op1" is a GT_LCL_FLD, it has // a field sequence as a member; otherwise, it may be the addition of an a byref and a constant, where the const // has a field sequence -- in this case "fieldSeq" is appended to that of the constant; otherwise, we // record the the field sequence using the ZeroOffsetFieldMap described above. // // One exception above is that "op1" is a node of type "TYP_REF" where "op1" is a GT_LCL_VAR. // This happens when System.Object vtable pointer is a regular field at offset 0 in System.Private.CoreLib in // CoreRT. Such case is handled same as the default case. void fgAddFieldSeqForZeroOffset(GenTree* op1, FieldSeqNode* fieldSeq); typedef JitHashTable<const GenTree*, JitPtrKeyFuncs<GenTree>, ArrayInfo> NodeToArrayInfoMap; NodeToArrayInfoMap* m_arrayInfoMap; NodeToArrayInfoMap* GetArrayInfoMap() { Compiler* compRoot = impInlineRoot(); if (compRoot->m_arrayInfoMap == nullptr) { // Create a CompAllocator that labels sub-structure with CMK_ArrayInfoMap, and use that for allocation. CompAllocator ialloc(getAllocator(CMK_ArrayInfoMap)); compRoot->m_arrayInfoMap = new (ialloc) NodeToArrayInfoMap(ialloc); } return compRoot->m_arrayInfoMap; } //----------------------------------------------------------------------------------------------------------------- // Compiler::TryGetArrayInfo: // Given an indirection node, checks to see whether or not that indirection represents an array access, and // if so returns information about the array. // // Arguments: // indir - The `GT_IND` node. // arrayInfo (out) - Information about the accessed array if this function returns true. Undefined otherwise. // // Returns: // True if the `GT_IND` node represents an array access; false otherwise. bool TryGetArrayInfo(GenTreeIndir* indir, ArrayInfo* arrayInfo) { if ((indir->gtFlags & GTF_IND_ARR_INDEX) == 0) { return false; } if (indir->gtOp1->OperIs(GT_INDEX_ADDR)) { GenTreeIndexAddr* const indexAddr = indir->gtOp1->AsIndexAddr(); *arrayInfo = ArrayInfo(indexAddr->gtElemType, indexAddr->gtElemSize, indexAddr->gtElemOffset, indexAddr->gtStructElemClass); return true; } bool found = GetArrayInfoMap()->Lookup(indir, arrayInfo); assert(found); return true; } NodeToUnsignedMap* m_memorySsaMap[MemoryKindCount]; // In some cases, we want to assign intermediate SSA #'s to memory states, and know what nodes create those memory // states. (We do this for try blocks, where, if the try block doesn't do a call that loses track of the memory // state, all the possible memory states are possible initial states of the corresponding catch block(s).) NodeToUnsignedMap* GetMemorySsaMap(MemoryKind memoryKind) { if (memoryKind == GcHeap && byrefStatesMatchGcHeapStates) { // Use the same map for GCHeap and ByrefExposed when their states match. memoryKind = ByrefExposed; } assert(memoryKind < MemoryKindCount); Compiler* compRoot = impInlineRoot(); if (compRoot->m_memorySsaMap[memoryKind] == nullptr) { // Create a CompAllocator that labels sub-structure with CMK_ArrayInfoMap, and use that for allocation. CompAllocator ialloc(getAllocator(CMK_ArrayInfoMap)); compRoot->m_memorySsaMap[memoryKind] = new (ialloc) NodeToUnsignedMap(ialloc); } return compRoot->m_memorySsaMap[memoryKind]; } // The Refany type is the only struct type whose structure is implicitly assumed by IL. We need its fields. CORINFO_CLASS_HANDLE m_refAnyClass; CORINFO_FIELD_HANDLE GetRefanyDataField() { if (m_refAnyClass == nullptr) { m_refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF); } return info.compCompHnd->getFieldInClass(m_refAnyClass, 0); } CORINFO_FIELD_HANDLE GetRefanyTypeField() { if (m_refAnyClass == nullptr) { m_refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF); } return info.compCompHnd->getFieldInClass(m_refAnyClass, 1); } #if VARSET_COUNTOPS static BitSetSupport::BitSetOpCounter m_varsetOpCounter; #endif #if ALLVARSET_COUNTOPS static BitSetSupport::BitSetOpCounter m_allvarsetOpCounter; #endif static HelperCallProperties s_helperCallProperties; #ifdef UNIX_AMD64_ABI static var_types GetTypeFromClassificationAndSizes(SystemVClassificationType classType, int size); static var_types GetEightByteType(const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR& structDesc, unsigned slotNum); static void GetStructTypeOffset(const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR& structDesc, var_types* type0, var_types* type1, unsigned __int8* offset0, unsigned __int8* offset1); void GetStructTypeOffset(CORINFO_CLASS_HANDLE typeHnd, var_types* type0, var_types* type1, unsigned __int8* offset0, unsigned __int8* offset1); #endif // defined(UNIX_AMD64_ABI) void fgMorphMultiregStructArgs(GenTreeCall* call); GenTree* fgMorphMultiregStructArg(GenTree* arg, fgArgTabEntry* fgEntryPtr); bool killGCRefs(GenTree* tree); }; // end of class Compiler //--------------------------------------------------------------------------------------------------------------------- // GenTreeVisitor: a flexible tree walker implemented using the curiously-recurring-template pattern. // // This class implements a configurable walker for IR trees. There are five configuration options (defaults values are // shown in parentheses): // // - ComputeStack (false): when true, the walker will push each node onto the `m_ancestors` stack. "Ancestors" is a bit // of a misnomer, as the first entry will always be the current node. // // - DoPreOrder (false): when true, the walker will invoke `TVisitor::PreOrderVisit` with the current node as an // argument before visiting the node's operands. // // - DoPostOrder (false): when true, the walker will invoke `TVisitor::PostOrderVisit` with the current node as an // argument after visiting the node's operands. // // - DoLclVarsOnly (false): when true, the walker will only invoke `TVisitor::PreOrderVisit` for lclVar nodes. // `DoPreOrder` must be true if this option is true. // // - UseExecutionOrder (false): when true, then walker will visit a node's operands in execution order (e.g. if a // binary operator has the `GTF_REVERSE_OPS` flag set, the second operand will be // visited before the first). // // At least one of `DoPreOrder` and `DoPostOrder` must be specified. // // A simple pre-order visitor might look something like the following: // // class CountingVisitor final : public GenTreeVisitor<CountingVisitor> // { // public: // enum // { // DoPreOrder = true // }; // // unsigned m_count; // // CountingVisitor(Compiler* compiler) // : GenTreeVisitor<CountingVisitor>(compiler), m_count(0) // { // } // // Compiler::fgWalkResult PreOrderVisit(GenTree* node) // { // m_count++; // } // }; // // This visitor would then be used like so: // // CountingVisitor countingVisitor(compiler); // countingVisitor.WalkTree(root); // template <typename TVisitor> class GenTreeVisitor { protected: typedef Compiler::fgWalkResult fgWalkResult; enum { ComputeStack = false, DoPreOrder = false, DoPostOrder = false, DoLclVarsOnly = false, UseExecutionOrder = false, }; Compiler* m_compiler; ArrayStack<GenTree*> m_ancestors; GenTreeVisitor(Compiler* compiler) : m_compiler(compiler), m_ancestors(compiler->getAllocator(CMK_ArrayStack)) { assert(compiler != nullptr); static_assert_no_msg(TVisitor::DoPreOrder || TVisitor::DoPostOrder); static_assert_no_msg(!TVisitor::DoLclVarsOnly || TVisitor::DoPreOrder); } fgWalkResult PreOrderVisit(GenTree** use, GenTree* user) { return fgWalkResult::WALK_CONTINUE; } fgWalkResult PostOrderVisit(GenTree** use, GenTree* user) { return fgWalkResult::WALK_CONTINUE; } public: fgWalkResult WalkTree(GenTree** use, GenTree* user) { assert(use != nullptr); GenTree* node = *use; if (TVisitor::ComputeStack) { m_ancestors.Push(node); } fgWalkResult result = fgWalkResult::WALK_CONTINUE; if (TVisitor::DoPreOrder && !TVisitor::DoLclVarsOnly) { result = reinterpret_cast<TVisitor*>(this)->PreOrderVisit(use, user); if (result == fgWalkResult::WALK_ABORT) { return result; } node = *use; if ((node == nullptr) || (result == fgWalkResult::WALK_SKIP_SUBTREES)) { goto DONE; } } switch (node->OperGet()) { // Leaf lclVars case GT_LCL_VAR: case GT_LCL_FLD: case GT_LCL_VAR_ADDR: case GT_LCL_FLD_ADDR: if (TVisitor::DoLclVarsOnly) { result = reinterpret_cast<TVisitor*>(this)->PreOrderVisit(use, user); if (result == fgWalkResult::WALK_ABORT) { return result; } } FALLTHROUGH; // Leaf nodes case GT_CATCH_ARG: case GT_LABEL: case GT_FTN_ADDR: case GT_RET_EXPR: case GT_CNS_INT: case GT_CNS_LNG: case GT_CNS_DBL: case GT_CNS_STR: case GT_MEMORYBARRIER: case GT_JMP: case GT_JCC: case GT_SETCC: case GT_NO_OP: case GT_START_NONGC: case GT_START_PREEMPTGC: case GT_PROF_HOOK: #if !defined(FEATURE_EH_FUNCLETS) case GT_END_LFIN: #endif // !FEATURE_EH_FUNCLETS case GT_PHI_ARG: case GT_JMPTABLE: case GT_CLS_VAR: case GT_CLS_VAR_ADDR: case GT_ARGPLACE: case GT_PHYSREG: case GT_EMITNOP: case GT_PINVOKE_PROLOG: case GT_PINVOKE_EPILOG: case GT_IL_OFFSET: break; // Lclvar unary operators case GT_STORE_LCL_VAR: case GT_STORE_LCL_FLD: if (TVisitor::DoLclVarsOnly) { result = reinterpret_cast<TVisitor*>(this)->PreOrderVisit(use, user); if (result == fgWalkResult::WALK_ABORT) { return result; } } FALLTHROUGH; // Standard unary operators case GT_NOT: case GT_NEG: case GT_BSWAP: case GT_BSWAP16: case GT_COPY: case GT_RELOAD: case GT_ARR_LENGTH: case GT_CAST: case GT_BITCAST: case GT_CKFINITE: case GT_LCLHEAP: case GT_ADDR: case GT_IND: case GT_OBJ: case GT_BLK: case GT_BOX: case GT_ALLOCOBJ: case GT_INIT_VAL: case GT_JTRUE: case GT_SWITCH: case GT_NULLCHECK: case GT_PUTARG_REG: case GT_PUTARG_STK: case GT_PUTARG_TYPE: case GT_RETURNTRAP: case GT_NOP: case GT_FIELD: case GT_RETURN: case GT_RETFILT: case GT_RUNTIMELOOKUP: case GT_KEEPALIVE: case GT_INC_SATURATE: { GenTreeUnOp* const unOp = node->AsUnOp(); if (unOp->gtOp1 != nullptr) { result = WalkTree(&unOp->gtOp1, unOp); if (result == fgWalkResult::WALK_ABORT) { return result; } } break; } // Special nodes case GT_PHI: for (GenTreePhi::Use& use : node->AsPhi()->Uses()) { result = WalkTree(&use.NodeRef(), node); if (result == fgWalkResult::WALK_ABORT) { return result; } } break; case GT_FIELD_LIST: for (GenTreeFieldList::Use& use : node->AsFieldList()->Uses()) { result = WalkTree(&use.NodeRef(), node); if (result == fgWalkResult::WALK_ABORT) { return result; } } break; case GT_CMPXCHG: { GenTreeCmpXchg* const cmpXchg = node->AsCmpXchg(); result = WalkTree(&cmpXchg->gtOpLocation, cmpXchg); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(&cmpXchg->gtOpValue, cmpXchg); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(&cmpXchg->gtOpComparand, cmpXchg); if (result == fgWalkResult::WALK_ABORT) { return result; } break; } case GT_ARR_ELEM: { GenTreeArrElem* const arrElem = node->AsArrElem(); result = WalkTree(&arrElem->gtArrObj, arrElem); if (result == fgWalkResult::WALK_ABORT) { return result; } const unsigned rank = arrElem->gtArrRank; for (unsigned dim = 0; dim < rank; dim++) { result = WalkTree(&arrElem->gtArrInds[dim], arrElem); if (result == fgWalkResult::WALK_ABORT) { return result; } } break; } case GT_ARR_OFFSET: { GenTreeArrOffs* const arrOffs = node->AsArrOffs(); result = WalkTree(&arrOffs->gtOffset, arrOffs); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(&arrOffs->gtIndex, arrOffs); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(&arrOffs->gtArrObj, arrOffs); if (result == fgWalkResult::WALK_ABORT) { return result; } break; } case GT_STORE_DYN_BLK: { GenTreeStoreDynBlk* const dynBlock = node->AsStoreDynBlk(); GenTree** op1Use = &dynBlock->gtOp1; GenTree** op2Use = &dynBlock->gtOp2; GenTree** op3Use = &dynBlock->gtDynamicSize; result = WalkTree(op1Use, dynBlock); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(op2Use, dynBlock); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(op3Use, dynBlock); if (result == fgWalkResult::WALK_ABORT) { return result; } break; } case GT_CALL: { GenTreeCall* const call = node->AsCall(); if (call->gtCallThisArg != nullptr) { result = WalkTree(&call->gtCallThisArg->NodeRef(), call); if (result == fgWalkResult::WALK_ABORT) { return result; } } for (GenTreeCall::Use& use : call->Args()) { result = WalkTree(&use.NodeRef(), call); if (result == fgWalkResult::WALK_ABORT) { return result; } } for (GenTreeCall::Use& use : call->LateArgs()) { result = WalkTree(&use.NodeRef(), call); if (result == fgWalkResult::WALK_ABORT) { return result; } } if (call->gtCallType == CT_INDIRECT) { if (call->gtCallCookie != nullptr) { result = WalkTree(&call->gtCallCookie, call); if (result == fgWalkResult::WALK_ABORT) { return result; } } result = WalkTree(&call->gtCallAddr, call); if (result == fgWalkResult::WALK_ABORT) { return result; } } if (call->gtControlExpr != nullptr) { result = WalkTree(&call->gtControlExpr, call); if (result == fgWalkResult::WALK_ABORT) { return result; } } break; } #if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS) #if defined(FEATURE_SIMD) case GT_SIMD: #endif #if defined(FEATURE_HW_INTRINSICS) case GT_HWINTRINSIC: #endif if (TVisitor::UseExecutionOrder && node->IsReverseOp()) { assert(node->AsMultiOp()->GetOperandCount() == 2); result = WalkTree(&node->AsMultiOp()->Op(2), node); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(&node->AsMultiOp()->Op(1), node); if (result == fgWalkResult::WALK_ABORT) { return result; } } else { for (GenTree** use : node->AsMultiOp()->UseEdges()) { result = WalkTree(use, node); if (result == fgWalkResult::WALK_ABORT) { return result; } } } break; #endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS) // Binary nodes default: { assert(node->OperIsBinary()); GenTreeOp* const op = node->AsOp(); GenTree** op1Use = &op->gtOp1; GenTree** op2Use = &op->gtOp2; if (TVisitor::UseExecutionOrder && node->IsReverseOp()) { std::swap(op1Use, op2Use); } if (*op1Use != nullptr) { result = WalkTree(op1Use, op); if (result == fgWalkResult::WALK_ABORT) { return result; } } if (*op2Use != nullptr) { result = WalkTree(op2Use, op); if (result == fgWalkResult::WALK_ABORT) { return result; } } break; } } DONE: // Finally, visit the current node if (TVisitor::DoPostOrder) { result = reinterpret_cast<TVisitor*>(this)->PostOrderVisit(use, user); } if (TVisitor::ComputeStack) { m_ancestors.Pop(); } return result; } }; template <bool computeStack, bool doPreOrder, bool doPostOrder, bool doLclVarsOnly, bool useExecutionOrder> class GenericTreeWalker final : public GenTreeVisitor<GenericTreeWalker<computeStack, doPreOrder, doPostOrder, doLclVarsOnly, useExecutionOrder>> { public: enum { ComputeStack = computeStack, DoPreOrder = doPreOrder, DoPostOrder = doPostOrder, DoLclVarsOnly = doLclVarsOnly, UseExecutionOrder = useExecutionOrder, }; private: Compiler::fgWalkData* m_walkData; public: GenericTreeWalker(Compiler::fgWalkData* walkData) : GenTreeVisitor<GenericTreeWalker<computeStack, doPreOrder, doPostOrder, doLclVarsOnly, useExecutionOrder>>( walkData->compiler) , m_walkData(walkData) { assert(walkData != nullptr); if (computeStack) { walkData->parentStack = &this->m_ancestors; } } Compiler::fgWalkResult PreOrderVisit(GenTree** use, GenTree* user) { m_walkData->parent = user; return m_walkData->wtprVisitorFn(use, m_walkData); } Compiler::fgWalkResult PostOrderVisit(GenTree** use, GenTree* user) { m_walkData->parent = user; return m_walkData->wtpoVisitorFn(use, m_walkData); } }; // A dominator tree visitor implemented using the curiously-recurring-template pattern, similar to GenTreeVisitor. template <typename TVisitor> class DomTreeVisitor { protected: Compiler* const m_compiler; DomTreeNode* const m_domTree; DomTreeVisitor(Compiler* compiler, DomTreeNode* domTree) : m_compiler(compiler), m_domTree(domTree) { } void Begin() { } void PreOrderVisit(BasicBlock* block) { } void PostOrderVisit(BasicBlock* block) { } void End() { } public: //------------------------------------------------------------------------ // WalkTree: Walk the dominator tree, starting from fgFirstBB. // // Notes: // This performs a non-recursive, non-allocating walk of the tree by using // DomTreeNode's firstChild and nextSibling links to locate the children of // a node and BasicBlock's bbIDom parent link to go back up the tree when // no more children are left. // // Forests are also supported, provided that all the roots are chained via // DomTreeNode::nextSibling to fgFirstBB. // void WalkTree() { static_cast<TVisitor*>(this)->Begin(); for (BasicBlock *next, *block = m_compiler->fgFirstBB; block != nullptr; block = next) { static_cast<TVisitor*>(this)->PreOrderVisit(block); next = m_domTree[block->bbNum].firstChild; if (next != nullptr) { assert(next->bbIDom == block); continue; } do { static_cast<TVisitor*>(this)->PostOrderVisit(block); next = m_domTree[block->bbNum].nextSibling; if (next != nullptr) { assert(next->bbIDom == block->bbIDom); break; } block = block->bbIDom; } while (block != nullptr); } static_cast<TVisitor*>(this)->End(); } }; // EHClauses: adapter class for forward iteration of the exception handling table using range-based `for`, e.g.: // for (EHblkDsc* const ehDsc : EHClauses(compiler)) // class EHClauses { EHblkDsc* m_begin; EHblkDsc* m_end; // Forward iterator for the exception handling table entries. Iteration is in table order. // class iterator { EHblkDsc* m_ehDsc; public: iterator(EHblkDsc* ehDsc) : m_ehDsc(ehDsc) { } EHblkDsc* operator*() const { return m_ehDsc; } iterator& operator++() { ++m_ehDsc; return *this; } bool operator!=(const iterator& i) const { return m_ehDsc != i.m_ehDsc; } }; public: EHClauses(Compiler* comp) : m_begin(comp->compHndBBtab), m_end(comp->compHndBBtab + comp->compHndBBtabCount) { assert((m_begin != nullptr) || (m_begin == m_end)); } iterator begin() const { return iterator(m_begin); } iterator end() const { return iterator(m_end); } }; /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Miscellaneous Compiler stuff XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ // Values used to mark the types a stack slot is used for const unsigned TYPE_REF_INT = 0x01; // slot used as a 32-bit int const unsigned TYPE_REF_LNG = 0x02; // slot used as a 64-bit long const unsigned TYPE_REF_FLT = 0x04; // slot used as a 32-bit float const unsigned TYPE_REF_DBL = 0x08; // slot used as a 64-bit float const unsigned TYPE_REF_PTR = 0x10; // slot used as a 32-bit pointer const unsigned TYPE_REF_BYR = 0x20; // slot used as a byref pointer const unsigned TYPE_REF_STC = 0x40; // slot used as a struct const unsigned TYPE_REF_TYPEMASK = 0x7F; // bits that represent the type // const unsigned TYPE_REF_ADDR_TAKEN = 0x80; // slots address was taken /***************************************************************************** * * Variables to keep track of total code amounts. */ #if DISPLAY_SIZES extern size_t grossVMsize; extern size_t grossNCsize; extern size_t totalNCsize; extern unsigned genMethodICnt; extern unsigned genMethodNCnt; extern size_t gcHeaderISize; extern size_t gcPtrMapISize; extern size_t gcHeaderNSize; extern size_t gcPtrMapNSize; #endif // DISPLAY_SIZES /***************************************************************************** * * Variables to keep track of basic block counts (more data on 1 BB methods) */ #if COUNT_BASIC_BLOCKS extern Histogram bbCntTable; extern Histogram bbOneBBSizeTable; #endif /***************************************************************************** * * Used by optFindNaturalLoops to gather statistical information such as * - total number of natural loops * - number of loops with 1, 2, ... exit conditions * - number of loops that have an iterator (for like) * - number of loops that have a constant iterator */ #if COUNT_LOOPS extern unsigned totalLoopMethods; // counts the total number of methods that have natural loops extern unsigned maxLoopsPerMethod; // counts the maximum number of loops a method has extern unsigned totalLoopOverflows; // # of methods that identified more loops than we can represent extern unsigned totalLoopCount; // counts the total number of natural loops extern unsigned totalUnnatLoopCount; // counts the total number of (not-necessarily natural) loops extern unsigned totalUnnatLoopOverflows; // # of methods that identified more unnatural loops than we can represent extern unsigned iterLoopCount; // counts the # of loops with an iterator (for like) extern unsigned simpleTestLoopCount; // counts the # of loops with an iterator and a simple loop condition (iter < // const) extern unsigned constIterLoopCount; // counts the # of loops with a constant iterator (for like) extern bool hasMethodLoops; // flag to keep track if we already counted a method as having loops extern unsigned loopsThisMethod; // counts the number of loops in the current method extern bool loopOverflowThisMethod; // True if we exceeded the max # of loops in the method. extern Histogram loopCountTable; // Histogram of loop counts extern Histogram loopExitCountTable; // Histogram of loop exit counts #endif // COUNT_LOOPS /***************************************************************************** * variables to keep track of how many iterations we go in a dataflow pass */ #if DATAFLOW_ITER extern unsigned CSEiterCount; // counts the # of iteration for the CSE dataflow extern unsigned CFiterCount; // counts the # of iteration for the Const Folding dataflow #endif // DATAFLOW_ITER #if MEASURE_BLOCK_SIZE extern size_t genFlowNodeSize; extern size_t genFlowNodeCnt; #endif // MEASURE_BLOCK_SIZE #if MEASURE_NODE_SIZE struct NodeSizeStats { void Init() { genTreeNodeCnt = 0; genTreeNodeSize = 0; genTreeNodeActualSize = 0; } // Count of tree nodes allocated. unsigned __int64 genTreeNodeCnt; // The size we allocate. unsigned __int64 genTreeNodeSize; // The actual size of the node. Note that the actual size will likely be smaller // than the allocated size, but we sometimes use SetOper()/ChangeOper() to change // a smaller node to a larger one. TODO-Cleanup: add stats on // SetOper()/ChangeOper() usage to quantify this. unsigned __int64 genTreeNodeActualSize; }; extern NodeSizeStats genNodeSizeStats; // Total node size stats extern NodeSizeStats genNodeSizeStatsPerFunc; // Per-function node size stats extern Histogram genTreeNcntHist; extern Histogram genTreeNsizHist; #endif // MEASURE_NODE_SIZE /***************************************************************************** * Count fatal errors (including noway_asserts). */ #if MEASURE_FATAL extern unsigned fatal_badCode; extern unsigned fatal_noWay; extern unsigned fatal_implLimitation; extern unsigned fatal_NOMEM; extern unsigned fatal_noWayAssertBody; #ifdef DEBUG extern unsigned fatal_noWayAssertBodyArgs; #endif // DEBUG extern unsigned fatal_NYI; #endif // MEASURE_FATAL /***************************************************************************** * Codegen */ #ifdef TARGET_XARCH const instruction INS_SHIFT_LEFT_LOGICAL = INS_shl; const instruction INS_SHIFT_RIGHT_LOGICAL = INS_shr; const instruction INS_SHIFT_RIGHT_ARITHM = INS_sar; const instruction INS_AND = INS_and; const instruction INS_OR = INS_or; const instruction INS_XOR = INS_xor; const instruction INS_NEG = INS_neg; const instruction INS_TEST = INS_test; const instruction INS_MUL = INS_imul; const instruction INS_SIGNED_DIVIDE = INS_idiv; const instruction INS_UNSIGNED_DIVIDE = INS_div; const instruction INS_BREAKPOINT = INS_int3; const instruction INS_ADDC = INS_adc; const instruction INS_SUBC = INS_sbb; const instruction INS_NOT = INS_not; #endif // TARGET_XARCH #ifdef TARGET_ARM const instruction INS_SHIFT_LEFT_LOGICAL = INS_lsl; const instruction INS_SHIFT_RIGHT_LOGICAL = INS_lsr; const instruction INS_SHIFT_RIGHT_ARITHM = INS_asr; const instruction INS_AND = INS_and; const instruction INS_OR = INS_orr; const instruction INS_XOR = INS_eor; const instruction INS_NEG = INS_rsb; const instruction INS_TEST = INS_tst; const instruction INS_MUL = INS_mul; const instruction INS_MULADD = INS_mla; const instruction INS_SIGNED_DIVIDE = INS_sdiv; const instruction INS_UNSIGNED_DIVIDE = INS_udiv; const instruction INS_BREAKPOINT = INS_bkpt; const instruction INS_ADDC = INS_adc; const instruction INS_SUBC = INS_sbc; const instruction INS_NOT = INS_mvn; const instruction INS_ABS = INS_vabs; const instruction INS_SQRT = INS_vsqrt; #endif // TARGET_ARM #ifdef TARGET_ARM64 const instruction INS_MULADD = INS_madd; inline const instruction INS_BREAKPOINT_osHelper() { // GDB needs the encoding of brk #0 // Windbg needs the encoding of brk #F000 return TargetOS::IsUnix ? INS_brk_unix : INS_brk_windows; } #define INS_BREAKPOINT INS_BREAKPOINT_osHelper() const instruction INS_ABS = INS_fabs; const instruction INS_SQRT = INS_fsqrt; #endif // TARGET_ARM64 /*****************************************************************************/ extern const BYTE genTypeSizes[]; extern const BYTE genTypeAlignments[]; extern const BYTE genTypeStSzs[]; extern const BYTE genActualTypes[]; /*****************************************************************************/ #ifdef DEBUG void dumpConvertedVarSet(Compiler* comp, VARSET_VALARG_TP vars); #endif // DEBUG #include "compiler.hpp" // All the shared inline functions /*****************************************************************************/ #endif //_COMPILER_H_ /*****************************************************************************/
1
dotnet/runtime
66,367
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled
When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
jakobbotsch
2022-03-09T00:01:53Z
2022-03-09T12:59:09Z
c9f7f7389e8e9a00d501aef696333b67d218baac
cef825fd5425203ac28d073bf9fccc33c638f179
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled. When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
./src/coreclr/jit/ee_il_dll.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX ee_jit.cpp XX XX XX XX The functionality needed for the JIT DLL. Includes the DLL entry point XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif #include "emit.h" #include "corexcep.h" #if !defined(HOST_UNIX) #include <io.h> // For _dup, _setmode #include <fcntl.h> // For _O_TEXT #include <errno.h> // For EINVAL #endif #ifndef DLLEXPORT #define DLLEXPORT #endif // !DLLEXPORT /*****************************************************************************/ FILE* jitstdout = nullptr; ICorJitHost* g_jitHost = nullptr; static CILJit* ILJitter = nullptr; // The one and only JITTER I return bool g_jitInitialized = false; /*****************************************************************************/ extern "C" DLLEXPORT void jitStartup(ICorJitHost* jitHost) { if (g_jitInitialized) { if (jitHost != g_jitHost) { // We normally don't expect jitStartup() to be invoked more than once. // (We check whether it has been called once due to an abundance of caution.) // However, during SuperPMI playback of MCH file, we need to JIT many different methods. // Each one carries its own environment configuration state. // So, we need the JIT to reload the JitConfig state for each change in the environment state of the // replayed compilations. // We do this by calling jitStartup with a different ICorJitHost, // and have the JIT re-initialize its JitConfig state when this happens. JitConfig.destroy(g_jitHost); JitConfig.initialize(jitHost); g_jitHost = jitHost; } return; } #ifdef HOST_UNIX int err = PAL_InitializeDLL(); if (err != 0) { return; } #endif g_jitHost = jitHost; assert(!JitConfig.isInitialized()); JitConfig.initialize(jitHost); #ifdef DEBUG const WCHAR* jitStdOutFile = JitConfig.JitStdOutFile(); if (jitStdOutFile != nullptr) { jitstdout = _wfopen(jitStdOutFile, W("a")); assert(jitstdout != nullptr); } #endif // DEBUG #if !defined(HOST_UNIX) if (jitstdout == nullptr) { int stdoutFd = _fileno(procstdout()); // Check fileno error output(s) -1 may overlap with errno result // but is included for completness. // We want to detect the case where the initial handle is null // or bogus and avoid making further calls. if ((stdoutFd != -1) && (stdoutFd != -2) && (errno != EINVAL)) { int jitstdoutFd = _dup(_fileno(procstdout())); // Check the error status returned by dup. if (jitstdoutFd != -1) { _setmode(jitstdoutFd, _O_TEXT); jitstdout = _fdopen(jitstdoutFd, "w"); assert(jitstdout != nullptr); // Prevent the FILE* from buffering its output in order to avoid calls to // `fflush()` throughout the code. setvbuf(jitstdout, nullptr, _IONBF, 0); } } } #endif // !HOST_UNIX // If jitstdout is still null, fallback to whatever procstdout() was // initially set to. if (jitstdout == nullptr) { jitstdout = procstdout(); } #ifdef FEATURE_TRACELOGGING JitTelemetry::NotifyDllProcessAttach(); #endif Compiler::compStartup(); g_jitInitialized = true; } void jitShutdown(bool processIsTerminating) { if (!g_jitInitialized) { return; } Compiler::compShutdown(); if (jitstdout != procstdout()) { // When the process is terminating, the fclose call is unnecessary and is also prone to // crashing since the UCRT itself often frees the backing memory earlier on in the // termination sequence. if (!processIsTerminating) { fclose(jitstdout); } } #ifdef FEATURE_TRACELOGGING JitTelemetry::NotifyDllProcessDetach(); #endif g_jitInitialized = false; } /*****************************************************************************/ struct CILJitSingletonAllocator { int x; }; const CILJitSingletonAllocator CILJitSingleton = {0}; void* __cdecl operator new(size_t, const CILJitSingletonAllocator&) { static char CILJitBuff[sizeof(CILJit)]; return CILJitBuff; } DLLEXPORT ICorJitCompiler* getJit() { if (!g_jitInitialized) { return nullptr; } if (ILJitter == nullptr) { ILJitter = new (CILJitSingleton) CILJit(); } return (ILJitter); } /*****************************************************************************/ // Information kept in thread-local storage. This is used in the noway_assert exceptional path. // If you are using it more broadly in retail code, you would need to understand the // performance implications of accessing TLS. thread_local void* gJitTls = nullptr; static void* GetJitTls() { return gJitTls; } void SetJitTls(void* value) { gJitTls = value; } #if defined(DEBUG) JitTls::JitTls(ICorJitInfo* jitInfo) : m_compiler(nullptr), m_logEnv(jitInfo) { m_next = reinterpret_cast<JitTls*>(GetJitTls()); SetJitTls(this); } JitTls::~JitTls() { SetJitTls(m_next); } LogEnv* JitTls::GetLogEnv() { return &reinterpret_cast<JitTls*>(GetJitTls())->m_logEnv; } Compiler* JitTls::GetCompiler() { return reinterpret_cast<JitTls*>(GetJitTls())->m_compiler; } void JitTls::SetCompiler(Compiler* compiler) { reinterpret_cast<JitTls*>(GetJitTls())->m_compiler = compiler; } #else // !defined(DEBUG) JitTls::JitTls(ICorJitInfo* jitInfo) { } JitTls::~JitTls() { } Compiler* JitTls::GetCompiler() { return reinterpret_cast<Compiler*>(GetJitTls()); } void JitTls::SetCompiler(Compiler* compiler) { SetJitTls(compiler); } #endif // !defined(DEBUG) //**************************************************************************** // The main JIT function for the 32 bit JIT. See code:ICorJitCompiler#EEToJitInterface for more on the EE-JIT // interface. Things really don't get going inside the JIT until the code:Compiler::compCompile#Phases // method. Usually that is where you want to go. CorJitResult CILJit::compileMethod(ICorJitInfo* compHnd, CORINFO_METHOD_INFO* methodInfo, unsigned flags, uint8_t** entryAddress, uint32_t* nativeSizeOfCode) { JitFlags jitFlags; assert(flags == CORJIT_FLAGS::CORJIT_FLAG_CALL_GETJITFLAGS); CORJIT_FLAGS corJitFlags; DWORD jitFlagsSize = compHnd->getJitFlags(&corJitFlags, sizeof(corJitFlags)); assert(jitFlagsSize == sizeof(corJitFlags)); jitFlags.SetFromFlags(corJitFlags); int result; void* methodCodePtr = nullptr; CORINFO_METHOD_HANDLE methodHandle = methodInfo->ftn; JitTls jitTls(compHnd); // Initialize any necessary thread-local state assert(methodInfo->ILCode); result = jitNativeCode(methodHandle, methodInfo->scope, compHnd, methodInfo, &methodCodePtr, nativeSizeOfCode, &jitFlags, nullptr); if (result == CORJIT_OK) { *entryAddress = (BYTE*)methodCodePtr; } return CorJitResult(result); } void CILJit::ProcessShutdownWork(ICorStaticInfo* statInfo) { jitShutdown(false); Compiler::ProcessShutdownWork(statInfo); } /***************************************************************************** * Verify the JIT/EE interface identifier. */ void CILJit::getVersionIdentifier(GUID* versionIdentifier) { assert(versionIdentifier != nullptr); memcpy(versionIdentifier, &JITEEVersionIdentifier, sizeof(GUID)); } #ifdef TARGET_OS_RUNTIMEDETERMINED bool TargetOS::OSSettingConfigured = false; bool TargetOS::IsWindows = false; bool TargetOS::IsUnix = false; bool TargetOS::IsMacOS = false; #endif /***************************************************************************** * Set the OS that this JIT should be generating code for. The contract with the VM * is that this must be called before compileMethod is called. */ void CILJit::setTargetOS(CORINFO_OS os) { #ifdef TARGET_OS_RUNTIMEDETERMINED TargetOS::IsMacOS = os == CORINFO_MACOS; TargetOS::IsUnix = (os == CORINFO_UNIX) || (os == CORINFO_MACOS); TargetOS::IsWindows = os == CORINFO_WINNT; TargetOS::OSSettingConfigured = true; #endif } /***************************************************************************** * Determine the maximum length of SIMD vector supported by this JIT. */ unsigned CILJit::getMaxIntrinsicSIMDVectorLength(CORJIT_FLAGS cpuCompileFlags) { JitFlags jitFlags; jitFlags.SetFromFlags(cpuCompileFlags); #ifdef FEATURE_SIMD #if defined(TARGET_XARCH) if (!jitFlags.IsSet(JitFlags::JIT_FLAG_PREJIT) && jitFlags.IsSet(JitFlags::JIT_FLAG_FEATURE_SIMD) && jitFlags.GetInstructionSetFlags().HasInstructionSet(InstructionSet_AVX2)) { if (GetJitTls() != nullptr && JitTls::GetCompiler() != nullptr) { JITDUMP("getMaxIntrinsicSIMDVectorLength: returning 32\n"); } return 32; } #endif // defined(TARGET_XARCH) if (GetJitTls() != nullptr && JitTls::GetCompiler() != nullptr) { JITDUMP("getMaxIntrinsicSIMDVectorLength: returning 16\n"); } return 16; #else // !FEATURE_SIMD if (GetJitTls() != nullptr && JitTls::GetCompiler() != nullptr) { JITDUMP("getMaxIntrinsicSIMDVectorLength: returning 0\n"); } return 0; #endif // !FEATURE_SIMD } //------------------------------------------------------------------------ // eeGetArgSize: Returns the number of bytes required for the given type argument // including padding after the actual value. // // Arguments: // list - the arg list handle pointing to the argument // sig - the signature for the arg's method // // Return value: // the number of stack slots in stack arguments for the call. // // Notes: // - On most platforms arguments are passed with TARGET_POINTER_SIZE alignment, // so all types take an integer number of TARGET_POINTER_SIZE slots. // It is different for arm64 apple that packs some types without alignment and padding. // If the argument is passed by reference then the method returns REF size. // unsigned Compiler::eeGetArgSize(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* sig) { #if defined(TARGET_AMD64) // Everything fits into a single 'slot' size // to accommodate irregular sized structs, they are passed byref CLANG_FORMAT_COMMENT_ANCHOR; #ifdef UNIX_AMD64_ABI CORINFO_CLASS_HANDLE argClass; CorInfoType argTypeJit = strip(info.compCompHnd->getArgType(sig, list, &argClass)); var_types argType = JITtype2varType(argTypeJit); if (varTypeIsStruct(argType)) { unsigned structSize = info.compCompHnd->getClassSize(argClass); return roundUp(structSize, TARGET_POINTER_SIZE); } #endif // UNIX_AMD64_ABI return TARGET_POINTER_SIZE; #else // !TARGET_AMD64 CORINFO_CLASS_HANDLE argClass; CorInfoType argTypeJit = strip(info.compCompHnd->getArgType(sig, list, &argClass)); var_types argType = JITtype2varType(argTypeJit); unsigned argSize; var_types hfaType = TYP_UNDEF; bool isHfa = false; if (varTypeIsStruct(argType)) { hfaType = GetHfaType(argClass); isHfa = (hfaType != TYP_UNDEF); unsigned structSize = info.compCompHnd->getClassSize(argClass); // make certain the EE passes us back the right thing for refanys assert(argTypeJit != CORINFO_TYPE_REFANY || structSize == 2 * TARGET_POINTER_SIZE); // For each target that supports passing struct args in multiple registers // apply the target specific rules for them here: CLANG_FORMAT_COMMENT_ANCHOR; #if FEATURE_MULTIREG_ARGS #if defined(TARGET_ARM64) // Any structs that are larger than MAX_PASS_MULTIREG_BYTES are always passed by reference if (structSize > MAX_PASS_MULTIREG_BYTES) { // This struct is passed by reference using a single 'slot' return TARGET_POINTER_SIZE; } else { // Is the struct larger than 16 bytes if (structSize > (2 * TARGET_POINTER_SIZE)) { if (TargetOS::IsWindows && info.compIsVarArgs) { // Arm64 Varargs ABI requires passing in general purpose // registers. Force the decision of whether this is an HFA // to false to correctly pass as if it was not an HFA. isHfa = false; } if (!isHfa) { // This struct is passed by reference using a single 'slot' return TARGET_POINTER_SIZE; } } } #elif !defined(TARGET_ARM) NYI("unknown target"); #endif // defined(TARGET_XXX) #endif // FEATURE_MULTIREG_ARGS // Otherwise we will pass this struct by value in multiple registers/stack bytes. argSize = structSize; } else { argSize = genTypeSize(argType); } const unsigned argAlignment = eeGetArgAlignment(argType, (hfaType == TYP_FLOAT)); const unsigned argSizeWithPadding = roundUp(argSize, argAlignment); return argSizeWithPadding; #endif } //------------------------------------------------------------------------ // eeGetArgAlignment: Return arg passing alignment for the given type. // // Arguments: // type - the argument type // isFloatHfa - is it an HFA<float> type // // Return value: // the required alignment in bytes. // // Notes: // It currently doesn't return smaller than required alignment for arm32 (4 bytes for double and int64) // but it does not lead to issues because its alignment requirements are satisfied in other code parts. // TODO: fix this function and delete the other code that is handling this. // // static unsigned Compiler::eeGetArgAlignment(var_types type, bool isFloatHfa) { if (compMacOsArm64Abi()) { if (isFloatHfa) { assert(varTypeIsStruct(type)); return sizeof(float); } if (varTypeIsStruct(type)) { return TARGET_POINTER_SIZE; } const unsigned argSize = genTypeSize(type); assert((0 < argSize) && (argSize <= TARGET_POINTER_SIZE)); return argSize; } else { return TARGET_POINTER_SIZE; } } /*****************************************************************************/ GenTree* Compiler::eeGetPInvokeCookie(CORINFO_SIG_INFO* szMetaSig) { void *cookie, *pCookie; cookie = info.compCompHnd->GetCookieForPInvokeCalliSig(szMetaSig, &pCookie); assert((cookie == nullptr) != (pCookie == nullptr)); return gtNewIconEmbHndNode(cookie, pCookie, GTF_ICON_PINVKI_HDL, szMetaSig); } //------------------------------------------------------------------------ // eeGetArrayDataOffset: Gets the offset of a SDArray's first element // // Return Value: // The offset to the first array element. // // Notes: // See the comments at the definition of CORINFO_Array for a description of how arrays are laid out in memory. // // static unsigned Compiler::eeGetArrayDataOffset() { return OFFSETOF__CORINFO_Array__data; } //------------------------------------------------------------------------ // eeGetMDArrayDataOffset: Gets the offset of a MDArray's first element // // Arguments: // rank - The array rank // // Return Value: // The offset to the first array element. // // Assumptions: // The rank should be greater than 0. // // static unsigned Compiler::eeGetMDArrayDataOffset(unsigned rank) { assert(rank > 0); // Note that below we're specifically using genTypeSize(TYP_INT) because array // indices are not native int. return eeGetArrayDataOffset() + 2 * genTypeSize(TYP_INT) * rank; } //------------------------------------------------------------------------ // eeGetMDArrayLengthOffset: Returns the offset from the Array object to the // size for the given dimension. // // Arguments: // rank - the rank of the array // dimension - the dimension for which the lower bound offset will be returned. // // Return Value: // The offset. // // static unsigned Compiler::eeGetMDArrayLengthOffset(unsigned rank, unsigned dimension) { // Note that we don't actually need the `rank` value for this calculation, but we pass it anyway, // to be consistent with other MD array functions. assert(rank > 0); assert(dimension < rank); // Note that the lower bound and length fields of the Array object are always TYP_INT, even on 64-bit targets. return eeGetArrayDataOffset() + genTypeSize(TYP_INT) * dimension; } //------------------------------------------------------------------------ // eeGetMDArrayLowerBoundOffset: Returns the offset from the Array object to the // lower bound for the given dimension. // // Arguments: // rank - the rank of the array // dimension - the dimension for which the lower bound offset will be returned. // // Return Value: // The offset. // // static unsigned Compiler::eeGetMDArrayLowerBoundOffset(unsigned rank, unsigned dimension) { assert(rank > 0); assert(dimension < rank); // Note that the lower bound and length fields of the Array object are always TYP_INT, even on 64-bit targets. return eeGetArrayDataOffset() + genTypeSize(TYP_INT) * (dimension + rank); } /*****************************************************************************/ void Compiler::eeGetStmtOffsets() { ULONG32 offsetsCount; uint32_t* offsets; ICorDebugInfo::BoundaryTypes offsetsImplicit; if (compIsForInlining()) { // We do not get explicit boundaries for inlinees, only implicit ones. offsetsImplicit = impInlineRoot()->info.compStmtOffsetsImplicit; offsetsCount = 0; offsets = nullptr; } else { info.compCompHnd->getBoundaries(info.compMethodHnd, &offsetsCount, &offsets, &offsetsImplicit); } /* Set the implicit boundaries */ info.compStmtOffsetsImplicit = (ICorDebugInfo::BoundaryTypes)offsetsImplicit; /* Process the explicit boundaries */ info.compStmtOffsetsCount = 0; if (offsetsCount == 0) { return; } info.compStmtOffsets = new (this, CMK_DebugInfo) IL_OFFSET[offsetsCount]; for (unsigned i = 0; i < offsetsCount; i++) { if (offsets[i] > info.compILCodeSize) { continue; } info.compStmtOffsets[info.compStmtOffsetsCount] = offsets[i]; info.compStmtOffsetsCount++; } info.compCompHnd->freeArray(offsets); } /***************************************************************************** * * Debugging support - Local var info */ void Compiler::eeSetLVcount(unsigned count) { assert(opts.compScopeInfo); JITDUMP("VarLocInfo count is %d\n", count); eeVarsCount = count; if (eeVarsCount) { eeVars = (VarResultInfo*)info.compCompHnd->allocateArray(eeVarsCount * sizeof(eeVars[0])); } else { eeVars = nullptr; } } void Compiler::eeSetLVinfo(unsigned which, UNATIVE_OFFSET startOffs, UNATIVE_OFFSET length, unsigned varNum, const CodeGenInterface::siVarLoc& varLoc) { // ICorDebugInfo::VarLoc and CodeGenInterface::siVarLoc have to overlap // This is checked in siInit() assert(opts.compScopeInfo); assert(eeVarsCount > 0); assert(which < eeVarsCount); if (eeVars != nullptr) { eeVars[which].startOffset = startOffs; eeVars[which].endOffset = startOffs + length; eeVars[which].varNumber = varNum; eeVars[which].loc = varLoc; } } void Compiler::eeSetLVdone() { // necessary but not sufficient condition that the 2 struct definitions overlap assert(sizeof(eeVars[0]) == sizeof(ICorDebugInfo::NativeVarInfo)); assert(opts.compScopeInfo); #ifdef DEBUG if (verbose || opts.dspDebugInfo) { eeDispVars(info.compMethodHnd, eeVarsCount, (ICorDebugInfo::NativeVarInfo*)eeVars); } #endif // DEBUG info.compCompHnd->setVars(info.compMethodHnd, eeVarsCount, (ICorDebugInfo::NativeVarInfo*)eeVars); eeVars = nullptr; // We give up ownership after setVars() } void Compiler::eeGetVars() { ICorDebugInfo::ILVarInfo* varInfoTable; ULONG32 varInfoCount; bool extendOthers; info.compCompHnd->getVars(info.compMethodHnd, &varInfoCount, &varInfoTable, &extendOthers); #ifdef DEBUG if (verbose) { printf("getVars() returned cVars = %d, extendOthers = %s\n", varInfoCount, extendOthers ? "true" : "false"); } #endif // Over allocate in case extendOthers is set. SIZE_T varInfoCountExtra = varInfoCount; if (extendOthers) { varInfoCountExtra += info.compLocalsCount; } if (varInfoCountExtra == 0) { return; } info.compVarScopes = new (this, CMK_DebugInfo) VarScopeDsc[varInfoCountExtra]; VarScopeDsc* localVarPtr = info.compVarScopes; ICorDebugInfo::ILVarInfo* v = varInfoTable; for (unsigned i = 0; i < varInfoCount; i++, v++) { #ifdef DEBUG if (verbose) { printf("var:%d start:%d end:%d\n", v->varNumber, v->startOffset, v->endOffset); } #endif if (v->startOffset >= v->endOffset) { continue; } assert(v->startOffset <= info.compILCodeSize); assert(v->endOffset <= info.compILCodeSize); localVarPtr->vsdLifeBeg = v->startOffset; localVarPtr->vsdLifeEnd = v->endOffset; localVarPtr->vsdLVnum = i; localVarPtr->vsdVarNum = compMapILvarNum(v->varNumber); #ifdef DEBUG localVarPtr->vsdName = gtGetLclVarName(localVarPtr->vsdVarNum); #endif localVarPtr++; info.compVarScopesCount++; } /* If extendOthers is set, then assume the scope of unreported vars is the entire method. Note that this will cause fgExtendDbgLifetimes() to zero-initalize all of them. This will be expensive if it's used for too many variables. */ if (extendOthers) { // Allocate a bit-array for all the variables and initialize to false bool* varInfoProvided = getAllocator(CMK_Unknown).allocate<bool>(info.compLocalsCount); unsigned i; for (i = 0; i < info.compLocalsCount; i++) { varInfoProvided[i] = false; } // Find which vars have absolutely no varInfo provided for (i = 0; i < info.compVarScopesCount; i++) { varInfoProvided[info.compVarScopes[i].vsdVarNum] = true; } // Create entries for the variables with no varInfo for (unsigned varNum = 0; varNum < info.compLocalsCount; varNum++) { if (varInfoProvided[varNum]) { continue; } // Create a varInfo with scope over the entire method localVarPtr->vsdLifeBeg = 0; localVarPtr->vsdLifeEnd = info.compILCodeSize; localVarPtr->vsdVarNum = varNum; localVarPtr->vsdLVnum = info.compVarScopesCount; #ifdef DEBUG localVarPtr->vsdName = gtGetLclVarName(localVarPtr->vsdVarNum); #endif localVarPtr++; info.compVarScopesCount++; } } assert(localVarPtr <= info.compVarScopes + varInfoCountExtra); if (varInfoCount != 0) { info.compCompHnd->freeArray(varInfoTable); } #ifdef DEBUG if (verbose) { compDispLocalVars(); } #endif // DEBUG } #ifdef DEBUG void Compiler::eeDispVar(ICorDebugInfo::NativeVarInfo* var) { const char* name = nullptr; if (var->varNumber == (DWORD)ICorDebugInfo::VARARGS_HND_ILNUM) { name = "varargsHandle"; } else if (var->varNumber == (DWORD)ICorDebugInfo::RETBUF_ILNUM) { name = "retBuff"; } else if (var->varNumber == (DWORD)ICorDebugInfo::TYPECTXT_ILNUM) { name = "typeCtx"; } printf("%3d(%10s) : From %08Xh to %08Xh, in ", var->varNumber, (VarNameToStr(name) == nullptr) ? "UNKNOWN" : VarNameToStr(name), var->startOffset, var->endOffset); switch ((CodeGenInterface::siVarLocType)var->loc.vlType) { case CodeGenInterface::VLT_REG: case CodeGenInterface::VLT_REG_BYREF: case CodeGenInterface::VLT_REG_FP: printf("%s", getRegName(var->loc.vlReg.vlrReg)); if (var->loc.vlType == (ICorDebugInfo::VarLocType)CodeGenInterface::VLT_REG_BYREF) { printf(" byref"); } break; case CodeGenInterface::VLT_STK: case CodeGenInterface::VLT_STK_BYREF: if ((int)var->loc.vlStk.vlsBaseReg != (int)ICorDebugInfo::REGNUM_AMBIENT_SP) { printf("%s[%d] (1 slot)", getRegName(var->loc.vlStk.vlsBaseReg), var->loc.vlStk.vlsOffset); } else { printf(STR_SPBASE "'[%d] (1 slot)", var->loc.vlStk.vlsOffset); } if (var->loc.vlType == (ICorDebugInfo::VarLocType)CodeGenInterface::VLT_REG_BYREF) { printf(" byref"); } break; case CodeGenInterface::VLT_REG_REG: printf("%s-%s", getRegName(var->loc.vlRegReg.vlrrReg1), getRegName(var->loc.vlRegReg.vlrrReg2)); break; #ifndef TARGET_AMD64 case CodeGenInterface::VLT_REG_STK: if ((int)var->loc.vlRegStk.vlrsStk.vlrssBaseReg != (int)ICorDebugInfo::REGNUM_AMBIENT_SP) { printf("%s-%s[%d]", getRegName(var->loc.vlRegStk.vlrsReg), getRegName(var->loc.vlRegStk.vlrsStk.vlrssBaseReg), var->loc.vlRegStk.vlrsStk.vlrssOffset); } else { printf("%s-" STR_SPBASE "'[%d]", getRegName(var->loc.vlRegStk.vlrsReg), var->loc.vlRegStk.vlrsStk.vlrssOffset); } break; case CodeGenInterface::VLT_STK_REG: unreached(); // unexpected case CodeGenInterface::VLT_STK2: if ((int)var->loc.vlStk2.vls2BaseReg != (int)ICorDebugInfo::REGNUM_AMBIENT_SP) { printf("%s[%d] (2 slots)", getRegName(var->loc.vlStk2.vls2BaseReg), var->loc.vlStk2.vls2Offset); } else { printf(STR_SPBASE "'[%d] (2 slots)", var->loc.vlStk2.vls2Offset); } break; case CodeGenInterface::VLT_FPSTK: printf("ST(L-%d)", var->loc.vlFPstk.vlfReg); break; case CodeGenInterface::VLT_FIXED_VA: printf("fxd_va[%d]", var->loc.vlFixedVarArg.vlfvOffset); break; #endif // !TARGET_AMD64 default: unreached(); // unexpected } printf("\n"); } // Same parameters as ICorStaticInfo::setVars(). void Compiler::eeDispVars(CORINFO_METHOD_HANDLE ftn, ULONG32 cVars, ICorDebugInfo::NativeVarInfo* vars) { // Estimate number of unique vars with debug info // ALLVARSET_TP uniqueVars(AllVarSetOps::MakeEmpty(this)); for (unsigned i = 0; i < cVars; i++) { // ignore "special vars" and out of bounds vars if ((((int)vars[i].varNumber) >= 0) && (vars[i].varNumber < lclMAX_ALLSET_TRACKED)) { AllVarSetOps::AddElemD(this, uniqueVars, vars[i].varNumber); } } printf("; Variable debug info: %d live ranges, %d vars for method %s\n", cVars, AllVarSetOps::Count(this, uniqueVars), info.compFullName); for (unsigned i = 0; i < cVars; i++) { eeDispVar(&vars[i]); } } #endif // DEBUG /***************************************************************************** * * Debugging support - Line number info */ void Compiler::eeSetLIcount(unsigned count) { assert(opts.compDbgInfo); eeBoundariesCount = count; if (eeBoundariesCount) { eeBoundaries = (ICorDebugInfo::OffsetMapping*)info.compCompHnd->allocateArray(eeBoundariesCount * sizeof(eeBoundaries[0])); } else { eeBoundaries = nullptr; } } void Compiler::eeSetLIinfo(unsigned which, UNATIVE_OFFSET nativeOffset, IPmappingDscKind kind, const ILLocation& loc) { assert(opts.compDbgInfo); assert(eeBoundariesCount > 0 && eeBoundaries != nullptr); assert(which < eeBoundariesCount); eeBoundaries[which].nativeOffset = nativeOffset; eeBoundaries[which].source = (ICorDebugInfo::SourceTypes)0; switch (kind) { int source; case IPmappingDscKind::Normal: eeBoundaries[which].ilOffset = loc.GetOffset(); source = loc.IsStackEmpty() ? ICorDebugInfo::STACK_EMPTY : 0; source |= loc.IsCall() ? ICorDebugInfo::CALL_INSTRUCTION : 0; eeBoundaries[which].source = (ICorDebugInfo::SourceTypes)source; break; case IPmappingDscKind::Prolog: eeBoundaries[which].ilOffset = ICorDebugInfo::PROLOG; eeBoundaries[which].source = ICorDebugInfo::STACK_EMPTY; break; case IPmappingDscKind::Epilog: eeBoundaries[which].ilOffset = ICorDebugInfo::EPILOG; eeBoundaries[which].source = ICorDebugInfo::STACK_EMPTY; break; case IPmappingDscKind::NoMapping: eeBoundaries[which].ilOffset = ICorDebugInfo::NO_MAPPING; eeBoundaries[which].source = ICorDebugInfo::STACK_EMPTY; break; default: unreached(); } } void Compiler::eeSetLIdone() { assert(opts.compDbgInfo); #if defined(DEBUG) if (verbose || opts.dspDebugInfo) { eeDispLineInfos(); } #endif // DEBUG // necessary but not sufficient condition that the 2 struct definitions overlap assert(sizeof(eeBoundaries[0]) == sizeof(ICorDebugInfo::OffsetMapping)); info.compCompHnd->setBoundaries(info.compMethodHnd, eeBoundariesCount, (ICorDebugInfo::OffsetMapping*)eeBoundaries); eeBoundaries = nullptr; // we give up ownership after setBoundaries(); } #if defined(DEBUG) void Compiler::eeDispILOffs(IL_OFFSET offs) { printf("0x%04X", offs); } /* static */ void Compiler::eeDispSourceMappingOffs(uint32_t offs) { const char* specialOffs[] = {"EPILOG", "PROLOG", "NO_MAP"}; switch ((int)offs) // Need the cast since offs is unsigned and the case statements are comparing to signed. { case ICorDebugInfo::EPILOG: case ICorDebugInfo::PROLOG: case ICorDebugInfo::NO_MAPPING: assert(DWORD(ICorDebugInfo::EPILOG) + 1 == (unsigned)ICorDebugInfo::PROLOG); assert(DWORD(ICorDebugInfo::EPILOG) + 2 == (unsigned)ICorDebugInfo::NO_MAPPING); int specialOffsNum; specialOffsNum = offs - DWORD(ICorDebugInfo::EPILOG); printf("%s", specialOffs[specialOffsNum]); break; default: eeDispILOffs(offs); break; } } /* static */ void Compiler::eeDispLineInfo(const ICorDebugInfo::OffsetMapping* line) { printf("IL offs "); eeDispSourceMappingOffs(line->ilOffset); printf(" : 0x%08X", line->nativeOffset); if (line->source != 0) { // It seems like it should probably never be zero since ICorDebugInfo::SOURCE_TYPE_INVALID is zero. // However, the JIT has always generated this and printed "stack non-empty". printf(" ( "); if ((line->source & ICorDebugInfo::STACK_EMPTY) != 0) { printf("STACK_EMPTY "); } if ((line->source & ICorDebugInfo::CALL_INSTRUCTION) != 0) { printf("CALL_INSTRUCTION "); } if ((line->source & ICorDebugInfo::CALL_SITE) != 0) { printf("CALL_SITE "); } printf(")"); } printf("\n"); // We don't expect to see any other bits. assert((line->source & ~(ICorDebugInfo::STACK_EMPTY | ICorDebugInfo::CALL_INSTRUCTION)) == 0); } void Compiler::eeDispLineInfos() { printf("IP mapping count : %d\n", eeBoundariesCount); // this might be zero for (unsigned i = 0; i < eeBoundariesCount; i++) { eeDispLineInfo(&eeBoundaries[i]); } printf("\n"); } #endif // DEBUG /***************************************************************************** * * ICorJitInfo wrapper functions * * In many cases here, we don't tell the VM about various unwind or EH information if * we're an altjit for an unexpected architecture. If it's not a same architecture JIT * (e.g., host AMD64, target ARM64), then VM will get confused anyway. */ void Compiler::eeReserveUnwindInfo(bool isFunclet, bool isColdCode, ULONG unwindSize) { #ifdef DEBUG if (verbose) { printf("reserveUnwindInfo(isFunclet=%s, isColdCode=%s, unwindSize=0x%x)\n", isFunclet ? "true" : "false", isColdCode ? "true" : "false", unwindSize); } #endif // DEBUG if (info.compMatchedVM) { info.compCompHnd->reserveUnwindInfo(isFunclet, isColdCode, unwindSize); } } void Compiler::eeAllocUnwindInfo(BYTE* pHotCode, BYTE* pColdCode, ULONG startOffset, ULONG endOffset, ULONG unwindSize, BYTE* pUnwindBlock, CorJitFuncKind funcKind) { #ifdef DEBUG if (verbose) { printf("allocUnwindInfo(pHotCode=0x%p, pColdCode=0x%p, startOffset=0x%x, endOffset=0x%x, unwindSize=0x%x, " "pUnwindBlock=0x%p, funKind=%d", dspPtr(pHotCode), dspPtr(pColdCode), startOffset, endOffset, unwindSize, dspPtr(pUnwindBlock), funcKind); switch (funcKind) { case CORJIT_FUNC_ROOT: printf(" (main function)"); break; case CORJIT_FUNC_HANDLER: printf(" (handler)"); break; case CORJIT_FUNC_FILTER: printf(" (filter)"); break; default: printf(" (ILLEGAL)"); break; } printf(")\n"); } #endif // DEBUG if (info.compMatchedVM) { info.compCompHnd->allocUnwindInfo(pHotCode, pColdCode, startOffset, endOffset, unwindSize, pUnwindBlock, funcKind); } } void Compiler::eeSetEHcount(unsigned cEH) { #ifdef DEBUG if (verbose) { printf("setEHcount(cEH=%u)\n", cEH); } #endif // DEBUG if (info.compMatchedVM) { info.compCompHnd->setEHcount(cEH); } } void Compiler::eeSetEHinfo(unsigned EHnumber, const CORINFO_EH_CLAUSE* clause) { #ifdef DEBUG if (opts.dspEHTable) { dispOutgoingEHClause(EHnumber, *clause); } #endif // DEBUG if (info.compMatchedVM) { info.compCompHnd->setEHinfo(EHnumber, clause); } } WORD Compiler::eeGetRelocTypeHint(void* target) { if (info.compMatchedVM) { return info.compCompHnd->getRelocTypeHint(target); } else { // No hints return (WORD)-1; } } CORINFO_FIELD_HANDLE Compiler::eeFindJitDataOffs(unsigned dataOffs) { // Data offsets are marked by the fact that the low two bits are 0b01 0x1 assert(dataOffs < 0x40000000); return (CORINFO_FIELD_HANDLE)(size_t)((dataOffs << iaut_SHIFT) | iaut_DATA_OFFSET); } bool Compiler::eeIsJitDataOffs(CORINFO_FIELD_HANDLE field) { // if 'field' is a jit data offset it has to fit into a 32-bit unsigned int unsigned value = static_cast<unsigned>(reinterpret_cast<uintptr_t>(field)); if (((CORINFO_FIELD_HANDLE)(size_t)value) != field) { return false; // some bits in the upper 32 bits were set, not a jit data offset } // Data offsets are marked by the fact that the low two bits are 0b01 return (value & iaut_MASK) == iaut_DATA_OFFSET; } int Compiler::eeGetJitDataOffs(CORINFO_FIELD_HANDLE field) { // Data offsets are marked by the fact that the low two bits are 0b01 0x1 if (eeIsJitDataOffs(field)) { unsigned dataOffs = static_cast<unsigned>(reinterpret_cast<uintptr_t>(field)); assert(((CORINFO_FIELD_HANDLE)(size_t)dataOffs) == field); assert(dataOffs < 0x40000000); // Shift away the low two bits return (static_cast<int>(reinterpret_cast<intptr_t>(field))) >> iaut_SHIFT; } else { return -1; } } /***************************************************************************** * * ICorStaticInfo wrapper functions */ #if defined(UNIX_AMD64_ABI) #ifdef DEBUG void Compiler::dumpSystemVClassificationType(SystemVClassificationType ct) { switch (ct) { case SystemVClassificationTypeUnknown: printf("UNKNOWN"); break; case SystemVClassificationTypeStruct: printf("Struct"); break; case SystemVClassificationTypeNoClass: printf("NoClass"); break; case SystemVClassificationTypeMemory: printf("Memory"); break; case SystemVClassificationTypeInteger: printf("Integer"); break; case SystemVClassificationTypeIntegerReference: printf("IntegerReference"); break; case SystemVClassificationTypeIntegerByRef: printf("IntegerByReference"); break; case SystemVClassificationTypeSSE: printf("SSE"); break; default: printf("ILLEGAL"); break; } } #endif // DEBUG void Compiler::eeGetSystemVAmd64PassStructInRegisterDescriptor( /*IN*/ CORINFO_CLASS_HANDLE structHnd, /*OUT*/ SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* structPassInRegDescPtr) { bool ok = info.compCompHnd->getSystemVAmd64PassStructInRegisterDescriptor(structHnd, structPassInRegDescPtr); noway_assert(ok); #ifdef DEBUG if (verbose) { printf("**** getSystemVAmd64PassStructInRegisterDescriptor(0x%x (%s), ...) =>\n", dspPtr(structHnd), eeGetClassName(structHnd)); printf(" passedInRegisters = %s\n", dspBool(structPassInRegDescPtr->passedInRegisters)); if (structPassInRegDescPtr->passedInRegisters) { printf(" eightByteCount = %d\n", structPassInRegDescPtr->eightByteCount); for (unsigned int i = 0; i < structPassInRegDescPtr->eightByteCount; i++) { printf(" eightByte #%d -- classification: ", i); dumpSystemVClassificationType(structPassInRegDescPtr->eightByteClassifications[i]); printf(", byteSize: %d, byteOffset: %d\n", structPassInRegDescPtr->eightByteSizes[i], structPassInRegDescPtr->eightByteOffsets[i]); } } } #endif // DEBUG } #endif // UNIX_AMD64_ABI bool Compiler::eeTryResolveToken(CORINFO_RESOLVED_TOKEN* resolvedToken) { return info.compCompHnd->tryResolveToken(resolvedToken); } bool Compiler::eeRunWithErrorTrapImp(void (*function)(void*), void* param) { return info.compCompHnd->runWithErrorTrap(function, param); } bool Compiler::eeRunWithSPMIErrorTrapImp(void (*function)(void*), void* param) { return info.compCompHnd->runWithSPMIErrorTrap(function, param); } /***************************************************************************** * * Utility functions */ #if defined(DEBUG) || defined(FEATURE_JIT_METHOD_PERF) || defined(FEATURE_SIMD) || defined(FEATURE_TRACELOGGING) /*****************************************************************************/ // static helper names - constant array const char* jitHlpFuncTable[CORINFO_HELP_COUNT] = { #define JITHELPER(code, pfnHelper, sig) #code, #define DYNAMICJITHELPER(code, pfnHelper, sig) #code, #include "jithelpers.h" }; /***************************************************************************** * * Filter wrapper to handle exception filtering. * On Unix compilers don't support SEH. */ struct FilterSuperPMIExceptionsParam_ee_il { Compiler* pThis; Compiler::Info* pJitInfo; CORINFO_FIELD_HANDLE field; CORINFO_METHOD_HANDLE method; CORINFO_CLASS_HANDLE clazz; const char** classNamePtr; const char* fieldOrMethodOrClassNamePtr; EXCEPTION_POINTERS exceptionPointers; }; const char* Compiler::eeGetMethodName(CORINFO_METHOD_HANDLE method, const char** classNamePtr) { if (eeGetHelperNum(method) != CORINFO_HELP_UNDEF) { if (classNamePtr != nullptr) { *classNamePtr = "HELPER"; } CorInfoHelpFunc ftnNum = eeGetHelperNum(method); const char* name = info.compCompHnd->getHelperName(ftnNum); // If it's something unknown from a RET VM, or from SuperPMI, then use our own helper name table. if ((strcmp(name, "AnyJITHelper") == 0) || (strcmp(name, "Yickish helper name") == 0)) { if ((unsigned)ftnNum < CORINFO_HELP_COUNT) { name = jitHlpFuncTable[ftnNum]; } } return name; } if (eeIsNativeMethod(method)) { if (classNamePtr != nullptr) { *classNamePtr = "NATIVE"; } method = eeGetMethodHandleForNative(method); } FilterSuperPMIExceptionsParam_ee_il param; param.pThis = this; param.pJitInfo = &info; param.method = method; param.classNamePtr = classNamePtr; bool success = eeRunWithSPMIErrorTrap<FilterSuperPMIExceptionsParam_ee_il>( [](FilterSuperPMIExceptionsParam_ee_il* pParam) { pParam->fieldOrMethodOrClassNamePtr = pParam->pJitInfo->compCompHnd->getMethodName(pParam->method, pParam->classNamePtr); }, &param); if (!success) { if (param.classNamePtr != nullptr) { *(param.classNamePtr) = "hackishClassName"; } param.fieldOrMethodOrClassNamePtr = "hackishMethodName"; } return param.fieldOrMethodOrClassNamePtr; } const char* Compiler::eeGetFieldName(CORINFO_FIELD_HANDLE field, const char** classNamePtr) { FilterSuperPMIExceptionsParam_ee_il param; param.pThis = this; param.pJitInfo = &info; param.field = field; param.classNamePtr = classNamePtr; bool success = eeRunWithSPMIErrorTrap<FilterSuperPMIExceptionsParam_ee_il>( [](FilterSuperPMIExceptionsParam_ee_il* pParam) { pParam->fieldOrMethodOrClassNamePtr = pParam->pJitInfo->compCompHnd->getFieldName(pParam->field, pParam->classNamePtr); }, &param); if (!success) { param.fieldOrMethodOrClassNamePtr = "hackishFieldName"; } return param.fieldOrMethodOrClassNamePtr; } const char* Compiler::eeGetClassName(CORINFO_CLASS_HANDLE clsHnd) { FilterSuperPMIExceptionsParam_ee_il param; param.pThis = this; param.pJitInfo = &info; param.clazz = clsHnd; bool success = eeRunWithSPMIErrorTrap<FilterSuperPMIExceptionsParam_ee_il>( [](FilterSuperPMIExceptionsParam_ee_il* pParam) { pParam->fieldOrMethodOrClassNamePtr = pParam->pJitInfo->compCompHnd->getClassName(pParam->clazz); }, &param); if (!success) { param.fieldOrMethodOrClassNamePtr = "hackishClassName"; } return param.fieldOrMethodOrClassNamePtr; } #endif // DEBUG || FEATURE_JIT_METHOD_PERF #ifdef DEBUG const WCHAR* Compiler::eeGetCPString(size_t strHandle) { #ifdef HOST_UNIX return nullptr; #else char buff[512 + sizeof(CORINFO_String)]; // make this bulletproof, so it works even if we are wrong. if (ReadProcessMemory(GetCurrentProcess(), (void*)strHandle, buff, 4, nullptr) == 0) { return (nullptr); } CORINFO_String* asString = *((CORINFO_String**)strHandle); if (ReadProcessMemory(GetCurrentProcess(), asString, buff, sizeof(buff), nullptr) == 0) { return (nullptr); } if (asString->stringLen >= 255 || asString->chars[asString->stringLen] != 0) { return nullptr; } return (WCHAR*)(asString->chars); #endif // HOST_UNIX } #endif // DEBUG
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX ee_jit.cpp XX XX XX XX The functionality needed for the JIT DLL. Includes the DLL entry point XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif #include "emit.h" #include "corexcep.h" #if !defined(HOST_UNIX) #include <io.h> // For _dup, _setmode #include <fcntl.h> // For _O_TEXT #include <errno.h> // For EINVAL #endif #ifndef DLLEXPORT #define DLLEXPORT #endif // !DLLEXPORT /*****************************************************************************/ FILE* jitstdout = nullptr; ICorJitHost* g_jitHost = nullptr; static CILJit* ILJitter = nullptr; // The one and only JITTER I return bool g_jitInitialized = false; /*****************************************************************************/ extern "C" DLLEXPORT void jitStartup(ICorJitHost* jitHost) { if (g_jitInitialized) { if (jitHost != g_jitHost) { // We normally don't expect jitStartup() to be invoked more than once. // (We check whether it has been called once due to an abundance of caution.) // However, during SuperPMI playback of MCH file, we need to JIT many different methods. // Each one carries its own environment configuration state. // So, we need the JIT to reload the JitConfig state for each change in the environment state of the // replayed compilations. // We do this by calling jitStartup with a different ICorJitHost, // and have the JIT re-initialize its JitConfig state when this happens. JitConfig.destroy(g_jitHost); JitConfig.initialize(jitHost); g_jitHost = jitHost; } return; } #ifdef HOST_UNIX int err = PAL_InitializeDLL(); if (err != 0) { return; } #endif g_jitHost = jitHost; assert(!JitConfig.isInitialized()); JitConfig.initialize(jitHost); #ifdef DEBUG const WCHAR* jitStdOutFile = JitConfig.JitStdOutFile(); if (jitStdOutFile != nullptr) { jitstdout = _wfopen(jitStdOutFile, W("a")); assert(jitstdout != nullptr); } #endif // DEBUG #if !defined(HOST_UNIX) if (jitstdout == nullptr) { int stdoutFd = _fileno(procstdout()); // Check fileno error output(s) -1 may overlap with errno result // but is included for completness. // We want to detect the case where the initial handle is null // or bogus and avoid making further calls. if ((stdoutFd != -1) && (stdoutFd != -2) && (errno != EINVAL)) { int jitstdoutFd = _dup(_fileno(procstdout())); // Check the error status returned by dup. if (jitstdoutFd != -1) { _setmode(jitstdoutFd, _O_TEXT); jitstdout = _fdopen(jitstdoutFd, "w"); assert(jitstdout != nullptr); // Prevent the FILE* from buffering its output in order to avoid calls to // `fflush()` throughout the code. setvbuf(jitstdout, nullptr, _IONBF, 0); } } } #endif // !HOST_UNIX // If jitstdout is still null, fallback to whatever procstdout() was // initially set to. if (jitstdout == nullptr) { jitstdout = procstdout(); } #ifdef FEATURE_TRACELOGGING JitTelemetry::NotifyDllProcessAttach(); #endif Compiler::compStartup(); g_jitInitialized = true; } void jitShutdown(bool processIsTerminating) { if (!g_jitInitialized) { return; } Compiler::compShutdown(); if (jitstdout != procstdout()) { // When the process is terminating, the fclose call is unnecessary and is also prone to // crashing since the UCRT itself often frees the backing memory earlier on in the // termination sequence. if (!processIsTerminating) { fclose(jitstdout); } } #ifdef FEATURE_TRACELOGGING JitTelemetry::NotifyDllProcessDetach(); #endif g_jitInitialized = false; } /*****************************************************************************/ struct CILJitSingletonAllocator { int x; }; const CILJitSingletonAllocator CILJitSingleton = {0}; void* __cdecl operator new(size_t, const CILJitSingletonAllocator&) { static char CILJitBuff[sizeof(CILJit)]; return CILJitBuff; } DLLEXPORT ICorJitCompiler* getJit() { if (!g_jitInitialized) { return nullptr; } if (ILJitter == nullptr) { ILJitter = new (CILJitSingleton) CILJit(); } return (ILJitter); } /*****************************************************************************/ // Information kept in thread-local storage. This is used in the noway_assert exceptional path. // If you are using it more broadly in retail code, you would need to understand the // performance implications of accessing TLS. thread_local void* gJitTls = nullptr; static void* GetJitTls() { return gJitTls; } void SetJitTls(void* value) { gJitTls = value; } #if defined(DEBUG) JitTls::JitTls(ICorJitInfo* jitInfo) : m_compiler(nullptr), m_logEnv(jitInfo) { m_next = reinterpret_cast<JitTls*>(GetJitTls()); SetJitTls(this); } JitTls::~JitTls() { SetJitTls(m_next); } LogEnv* JitTls::GetLogEnv() { return &reinterpret_cast<JitTls*>(GetJitTls())->m_logEnv; } Compiler* JitTls::GetCompiler() { return reinterpret_cast<JitTls*>(GetJitTls())->m_compiler; } void JitTls::SetCompiler(Compiler* compiler) { reinterpret_cast<JitTls*>(GetJitTls())->m_compiler = compiler; } #else // !defined(DEBUG) JitTls::JitTls(ICorJitInfo* jitInfo) { } JitTls::~JitTls() { } Compiler* JitTls::GetCompiler() { return reinterpret_cast<Compiler*>(GetJitTls()); } void JitTls::SetCompiler(Compiler* compiler) { SetJitTls(compiler); } #endif // !defined(DEBUG) //**************************************************************************** // The main JIT function for the 32 bit JIT. See code:ICorJitCompiler#EEToJitInterface for more on the EE-JIT // interface. Things really don't get going inside the JIT until the code:Compiler::compCompile#Phases // method. Usually that is where you want to go. CorJitResult CILJit::compileMethod(ICorJitInfo* compHnd, CORINFO_METHOD_INFO* methodInfo, unsigned flags, uint8_t** entryAddress, uint32_t* nativeSizeOfCode) { JitFlags jitFlags; assert(flags == CORJIT_FLAGS::CORJIT_FLAG_CALL_GETJITFLAGS); CORJIT_FLAGS corJitFlags; DWORD jitFlagsSize = compHnd->getJitFlags(&corJitFlags, sizeof(corJitFlags)); assert(jitFlagsSize == sizeof(corJitFlags)); jitFlags.SetFromFlags(corJitFlags); int result; void* methodCodePtr = nullptr; CORINFO_METHOD_HANDLE methodHandle = methodInfo->ftn; JitTls jitTls(compHnd); // Initialize any necessary thread-local state assert(methodInfo->ILCode); result = jitNativeCode(methodHandle, methodInfo->scope, compHnd, methodInfo, &methodCodePtr, nativeSizeOfCode, &jitFlags, nullptr); if (result == CORJIT_OK) { *entryAddress = (BYTE*)methodCodePtr; } return CorJitResult(result); } void CILJit::ProcessShutdownWork(ICorStaticInfo* statInfo) { jitShutdown(false); Compiler::ProcessShutdownWork(statInfo); } /***************************************************************************** * Verify the JIT/EE interface identifier. */ void CILJit::getVersionIdentifier(GUID* versionIdentifier) { assert(versionIdentifier != nullptr); memcpy(versionIdentifier, &JITEEVersionIdentifier, sizeof(GUID)); } #ifdef TARGET_OS_RUNTIMEDETERMINED bool TargetOS::OSSettingConfigured = false; bool TargetOS::IsWindows = false; bool TargetOS::IsUnix = false; bool TargetOS::IsMacOS = false; #endif /***************************************************************************** * Set the OS that this JIT should be generating code for. The contract with the VM * is that this must be called before compileMethod is called. */ void CILJit::setTargetOS(CORINFO_OS os) { #ifdef TARGET_OS_RUNTIMEDETERMINED TargetOS::IsMacOS = os == CORINFO_MACOS; TargetOS::IsUnix = (os == CORINFO_UNIX) || (os == CORINFO_MACOS); TargetOS::IsWindows = os == CORINFO_WINNT; TargetOS::OSSettingConfigured = true; #endif } /***************************************************************************** * Determine the maximum length of SIMD vector supported by this JIT. */ unsigned CILJit::getMaxIntrinsicSIMDVectorLength(CORJIT_FLAGS cpuCompileFlags) { JitFlags jitFlags; jitFlags.SetFromFlags(cpuCompileFlags); #ifdef FEATURE_SIMD #if defined(TARGET_XARCH) if (!jitFlags.IsSet(JitFlags::JIT_FLAG_PREJIT) && jitFlags.IsSet(JitFlags::JIT_FLAG_FEATURE_SIMD) && jitFlags.GetInstructionSetFlags().HasInstructionSet(InstructionSet_AVX2)) { if (GetJitTls() != nullptr && JitTls::GetCompiler() != nullptr) { JITDUMP("getMaxIntrinsicSIMDVectorLength: returning 32\n"); } return 32; } #endif // defined(TARGET_XARCH) if (GetJitTls() != nullptr && JitTls::GetCompiler() != nullptr) { JITDUMP("getMaxIntrinsicSIMDVectorLength: returning 16\n"); } return 16; #else // !FEATURE_SIMD if (GetJitTls() != nullptr && JitTls::GetCompiler() != nullptr) { JITDUMP("getMaxIntrinsicSIMDVectorLength: returning 0\n"); } return 0; #endif // !FEATURE_SIMD } //------------------------------------------------------------------------ // eeGetArgSize: Returns the number of bytes required for the given type argument // including padding after the actual value. // // Arguments: // list - the arg list handle pointing to the argument // sig - the signature for the arg's method // // Return value: // the number of stack slots in stack arguments for the call. // // Notes: // - On most platforms arguments are passed with TARGET_POINTER_SIZE alignment, // so all types take an integer number of TARGET_POINTER_SIZE slots. // It is different for arm64 apple that packs some types without alignment and padding. // If the argument is passed by reference then the method returns REF size. // unsigned Compiler::eeGetArgSize(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* sig) { #if defined(TARGET_AMD64) // Everything fits into a single 'slot' size // to accommodate irregular sized structs, they are passed byref CLANG_FORMAT_COMMENT_ANCHOR; #ifdef UNIX_AMD64_ABI CORINFO_CLASS_HANDLE argClass; CorInfoType argTypeJit = strip(info.compCompHnd->getArgType(sig, list, &argClass)); var_types argType = JITtype2varType(argTypeJit); if (varTypeIsStruct(argType)) { unsigned structSize = info.compCompHnd->getClassSize(argClass); return roundUp(structSize, TARGET_POINTER_SIZE); } #endif // UNIX_AMD64_ABI return TARGET_POINTER_SIZE; #else // !TARGET_AMD64 CORINFO_CLASS_HANDLE argClass; CorInfoType argTypeJit = strip(info.compCompHnd->getArgType(sig, list, &argClass)); var_types argType = JITtype2varType(argTypeJit); unsigned argSize; var_types hfaType = TYP_UNDEF; bool isHfa = false; if (varTypeIsStruct(argType)) { hfaType = GetHfaType(argClass); isHfa = (hfaType != TYP_UNDEF); unsigned structSize = info.compCompHnd->getClassSize(argClass); // make certain the EE passes us back the right thing for refanys assert(argTypeJit != CORINFO_TYPE_REFANY || structSize == 2 * TARGET_POINTER_SIZE); // For each target that supports passing struct args in multiple registers // apply the target specific rules for them here: CLANG_FORMAT_COMMENT_ANCHOR; #if FEATURE_MULTIREG_ARGS #if defined(TARGET_ARM64) // Any structs that are larger than MAX_PASS_MULTIREG_BYTES are always passed by reference if (structSize > MAX_PASS_MULTIREG_BYTES) { // This struct is passed by reference using a single 'slot' return TARGET_POINTER_SIZE; } else { // Is the struct larger than 16 bytes if (structSize > (2 * TARGET_POINTER_SIZE)) { if (TargetOS::IsWindows && info.compIsVarArgs) { // Arm64 Varargs ABI requires passing in general purpose // registers. Force the decision of whether this is an HFA // to false to correctly pass as if it was not an HFA. isHfa = false; } if (!isHfa) { // This struct is passed by reference using a single 'slot' return TARGET_POINTER_SIZE; } } } #elif !defined(TARGET_ARM) NYI("unknown target"); #endif // defined(TARGET_XXX) #endif // FEATURE_MULTIREG_ARGS // Otherwise we will pass this struct by value in multiple registers/stack bytes. argSize = structSize; } else { argSize = genTypeSize(argType); } const unsigned argSizeAlignment = eeGetArgSizeAlignment(argType, (hfaType == TYP_FLOAT)); const unsigned alignedArgSize = roundUp(argSize, argSizeAlignment); return alignedArgSize; #endif } //------------------------------------------------------------------------ // eeGetArgSizeAlignment: Return alignment for an argument size. // // Arguments: // type - the argument type // isFloatHfa - is it an HFA<float> type // // Return value: // the required argument size alignment in bytes. // // Notes: // Usually values passed on the stack are aligned to stack slot (i.e. pointer size), except for // on macOS ARM ABI that allows packing multiple args into a single stack slot. // // The arg size alignment can be different from the normal alignment. One // example is on arm32 where a struct containing a double and float can // explicitly have size 12 but with alignment 8, in which case the size is // aligned to 4 (the stack slot size) while frame layout must still handle // aligning the argument to 8. // // static unsigned Compiler::eeGetArgSizeAlignment(var_types type, bool isFloatHfa) { if (compMacOsArm64Abi()) { if (isFloatHfa) { assert(varTypeIsStruct(type)); return sizeof(float); } if (varTypeIsStruct(type)) { return TARGET_POINTER_SIZE; } const unsigned argSize = genTypeSize(type); assert((0 < argSize) && (argSize <= TARGET_POINTER_SIZE)); return argSize; } else { return TARGET_POINTER_SIZE; } } /*****************************************************************************/ GenTree* Compiler::eeGetPInvokeCookie(CORINFO_SIG_INFO* szMetaSig) { void *cookie, *pCookie; cookie = info.compCompHnd->GetCookieForPInvokeCalliSig(szMetaSig, &pCookie); assert((cookie == nullptr) != (pCookie == nullptr)); return gtNewIconEmbHndNode(cookie, pCookie, GTF_ICON_PINVKI_HDL, szMetaSig); } //------------------------------------------------------------------------ // eeGetArrayDataOffset: Gets the offset of a SDArray's first element // // Return Value: // The offset to the first array element. // // Notes: // See the comments at the definition of CORINFO_Array for a description of how arrays are laid out in memory. // // static unsigned Compiler::eeGetArrayDataOffset() { return OFFSETOF__CORINFO_Array__data; } //------------------------------------------------------------------------ // eeGetMDArrayDataOffset: Gets the offset of a MDArray's first element // // Arguments: // rank - The array rank // // Return Value: // The offset to the first array element. // // Assumptions: // The rank should be greater than 0. // // static unsigned Compiler::eeGetMDArrayDataOffset(unsigned rank) { assert(rank > 0); // Note that below we're specifically using genTypeSize(TYP_INT) because array // indices are not native int. return eeGetArrayDataOffset() + 2 * genTypeSize(TYP_INT) * rank; } //------------------------------------------------------------------------ // eeGetMDArrayLengthOffset: Returns the offset from the Array object to the // size for the given dimension. // // Arguments: // rank - the rank of the array // dimension - the dimension for which the lower bound offset will be returned. // // Return Value: // The offset. // // static unsigned Compiler::eeGetMDArrayLengthOffset(unsigned rank, unsigned dimension) { // Note that we don't actually need the `rank` value for this calculation, but we pass it anyway, // to be consistent with other MD array functions. assert(rank > 0); assert(dimension < rank); // Note that the lower bound and length fields of the Array object are always TYP_INT, even on 64-bit targets. return eeGetArrayDataOffset() + genTypeSize(TYP_INT) * dimension; } //------------------------------------------------------------------------ // eeGetMDArrayLowerBoundOffset: Returns the offset from the Array object to the // lower bound for the given dimension. // // Arguments: // rank - the rank of the array // dimension - the dimension for which the lower bound offset will be returned. // // Return Value: // The offset. // // static unsigned Compiler::eeGetMDArrayLowerBoundOffset(unsigned rank, unsigned dimension) { assert(rank > 0); assert(dimension < rank); // Note that the lower bound and length fields of the Array object are always TYP_INT, even on 64-bit targets. return eeGetArrayDataOffset() + genTypeSize(TYP_INT) * (dimension + rank); } /*****************************************************************************/ void Compiler::eeGetStmtOffsets() { ULONG32 offsetsCount; uint32_t* offsets; ICorDebugInfo::BoundaryTypes offsetsImplicit; if (compIsForInlining()) { // We do not get explicit boundaries for inlinees, only implicit ones. offsetsImplicit = impInlineRoot()->info.compStmtOffsetsImplicit; offsetsCount = 0; offsets = nullptr; } else { info.compCompHnd->getBoundaries(info.compMethodHnd, &offsetsCount, &offsets, &offsetsImplicit); } /* Set the implicit boundaries */ info.compStmtOffsetsImplicit = (ICorDebugInfo::BoundaryTypes)offsetsImplicit; /* Process the explicit boundaries */ info.compStmtOffsetsCount = 0; if (offsetsCount == 0) { return; } info.compStmtOffsets = new (this, CMK_DebugInfo) IL_OFFSET[offsetsCount]; for (unsigned i = 0; i < offsetsCount; i++) { if (offsets[i] > info.compILCodeSize) { continue; } info.compStmtOffsets[info.compStmtOffsetsCount] = offsets[i]; info.compStmtOffsetsCount++; } info.compCompHnd->freeArray(offsets); } /***************************************************************************** * * Debugging support - Local var info */ void Compiler::eeSetLVcount(unsigned count) { assert(opts.compScopeInfo); JITDUMP("VarLocInfo count is %d\n", count); eeVarsCount = count; if (eeVarsCount) { eeVars = (VarResultInfo*)info.compCompHnd->allocateArray(eeVarsCount * sizeof(eeVars[0])); } else { eeVars = nullptr; } } void Compiler::eeSetLVinfo(unsigned which, UNATIVE_OFFSET startOffs, UNATIVE_OFFSET length, unsigned varNum, const CodeGenInterface::siVarLoc& varLoc) { // ICorDebugInfo::VarLoc and CodeGenInterface::siVarLoc have to overlap // This is checked in siInit() assert(opts.compScopeInfo); assert(eeVarsCount > 0); assert(which < eeVarsCount); if (eeVars != nullptr) { eeVars[which].startOffset = startOffs; eeVars[which].endOffset = startOffs + length; eeVars[which].varNumber = varNum; eeVars[which].loc = varLoc; } } void Compiler::eeSetLVdone() { // necessary but not sufficient condition that the 2 struct definitions overlap assert(sizeof(eeVars[0]) == sizeof(ICorDebugInfo::NativeVarInfo)); assert(opts.compScopeInfo); #ifdef DEBUG if (verbose || opts.dspDebugInfo) { eeDispVars(info.compMethodHnd, eeVarsCount, (ICorDebugInfo::NativeVarInfo*)eeVars); } #endif // DEBUG info.compCompHnd->setVars(info.compMethodHnd, eeVarsCount, (ICorDebugInfo::NativeVarInfo*)eeVars); eeVars = nullptr; // We give up ownership after setVars() } void Compiler::eeGetVars() { ICorDebugInfo::ILVarInfo* varInfoTable; ULONG32 varInfoCount; bool extendOthers; info.compCompHnd->getVars(info.compMethodHnd, &varInfoCount, &varInfoTable, &extendOthers); #ifdef DEBUG if (verbose) { printf("getVars() returned cVars = %d, extendOthers = %s\n", varInfoCount, extendOthers ? "true" : "false"); } #endif // Over allocate in case extendOthers is set. SIZE_T varInfoCountExtra = varInfoCount; if (extendOthers) { varInfoCountExtra += info.compLocalsCount; } if (varInfoCountExtra == 0) { return; } info.compVarScopes = new (this, CMK_DebugInfo) VarScopeDsc[varInfoCountExtra]; VarScopeDsc* localVarPtr = info.compVarScopes; ICorDebugInfo::ILVarInfo* v = varInfoTable; for (unsigned i = 0; i < varInfoCount; i++, v++) { #ifdef DEBUG if (verbose) { printf("var:%d start:%d end:%d\n", v->varNumber, v->startOffset, v->endOffset); } #endif if (v->startOffset >= v->endOffset) { continue; } assert(v->startOffset <= info.compILCodeSize); assert(v->endOffset <= info.compILCodeSize); localVarPtr->vsdLifeBeg = v->startOffset; localVarPtr->vsdLifeEnd = v->endOffset; localVarPtr->vsdLVnum = i; localVarPtr->vsdVarNum = compMapILvarNum(v->varNumber); #ifdef DEBUG localVarPtr->vsdName = gtGetLclVarName(localVarPtr->vsdVarNum); #endif localVarPtr++; info.compVarScopesCount++; } /* If extendOthers is set, then assume the scope of unreported vars is the entire method. Note that this will cause fgExtendDbgLifetimes() to zero-initalize all of them. This will be expensive if it's used for too many variables. */ if (extendOthers) { // Allocate a bit-array for all the variables and initialize to false bool* varInfoProvided = getAllocator(CMK_Unknown).allocate<bool>(info.compLocalsCount); unsigned i; for (i = 0; i < info.compLocalsCount; i++) { varInfoProvided[i] = false; } // Find which vars have absolutely no varInfo provided for (i = 0; i < info.compVarScopesCount; i++) { varInfoProvided[info.compVarScopes[i].vsdVarNum] = true; } // Create entries for the variables with no varInfo for (unsigned varNum = 0; varNum < info.compLocalsCount; varNum++) { if (varInfoProvided[varNum]) { continue; } // Create a varInfo with scope over the entire method localVarPtr->vsdLifeBeg = 0; localVarPtr->vsdLifeEnd = info.compILCodeSize; localVarPtr->vsdVarNum = varNum; localVarPtr->vsdLVnum = info.compVarScopesCount; #ifdef DEBUG localVarPtr->vsdName = gtGetLclVarName(localVarPtr->vsdVarNum); #endif localVarPtr++; info.compVarScopesCount++; } } assert(localVarPtr <= info.compVarScopes + varInfoCountExtra); if (varInfoCount != 0) { info.compCompHnd->freeArray(varInfoTable); } #ifdef DEBUG if (verbose) { compDispLocalVars(); } #endif // DEBUG } #ifdef DEBUG void Compiler::eeDispVar(ICorDebugInfo::NativeVarInfo* var) { const char* name = nullptr; if (var->varNumber == (DWORD)ICorDebugInfo::VARARGS_HND_ILNUM) { name = "varargsHandle"; } else if (var->varNumber == (DWORD)ICorDebugInfo::RETBUF_ILNUM) { name = "retBuff"; } else if (var->varNumber == (DWORD)ICorDebugInfo::TYPECTXT_ILNUM) { name = "typeCtx"; } printf("%3d(%10s) : From %08Xh to %08Xh, in ", var->varNumber, (VarNameToStr(name) == nullptr) ? "UNKNOWN" : VarNameToStr(name), var->startOffset, var->endOffset); switch ((CodeGenInterface::siVarLocType)var->loc.vlType) { case CodeGenInterface::VLT_REG: case CodeGenInterface::VLT_REG_BYREF: case CodeGenInterface::VLT_REG_FP: printf("%s", getRegName(var->loc.vlReg.vlrReg)); if (var->loc.vlType == (ICorDebugInfo::VarLocType)CodeGenInterface::VLT_REG_BYREF) { printf(" byref"); } break; case CodeGenInterface::VLT_STK: case CodeGenInterface::VLT_STK_BYREF: if ((int)var->loc.vlStk.vlsBaseReg != (int)ICorDebugInfo::REGNUM_AMBIENT_SP) { printf("%s[%d] (1 slot)", getRegName(var->loc.vlStk.vlsBaseReg), var->loc.vlStk.vlsOffset); } else { printf(STR_SPBASE "'[%d] (1 slot)", var->loc.vlStk.vlsOffset); } if (var->loc.vlType == (ICorDebugInfo::VarLocType)CodeGenInterface::VLT_REG_BYREF) { printf(" byref"); } break; case CodeGenInterface::VLT_REG_REG: printf("%s-%s", getRegName(var->loc.vlRegReg.vlrrReg1), getRegName(var->loc.vlRegReg.vlrrReg2)); break; #ifndef TARGET_AMD64 case CodeGenInterface::VLT_REG_STK: if ((int)var->loc.vlRegStk.vlrsStk.vlrssBaseReg != (int)ICorDebugInfo::REGNUM_AMBIENT_SP) { printf("%s-%s[%d]", getRegName(var->loc.vlRegStk.vlrsReg), getRegName(var->loc.vlRegStk.vlrsStk.vlrssBaseReg), var->loc.vlRegStk.vlrsStk.vlrssOffset); } else { printf("%s-" STR_SPBASE "'[%d]", getRegName(var->loc.vlRegStk.vlrsReg), var->loc.vlRegStk.vlrsStk.vlrssOffset); } break; case CodeGenInterface::VLT_STK_REG: unreached(); // unexpected case CodeGenInterface::VLT_STK2: if ((int)var->loc.vlStk2.vls2BaseReg != (int)ICorDebugInfo::REGNUM_AMBIENT_SP) { printf("%s[%d] (2 slots)", getRegName(var->loc.vlStk2.vls2BaseReg), var->loc.vlStk2.vls2Offset); } else { printf(STR_SPBASE "'[%d] (2 slots)", var->loc.vlStk2.vls2Offset); } break; case CodeGenInterface::VLT_FPSTK: printf("ST(L-%d)", var->loc.vlFPstk.vlfReg); break; case CodeGenInterface::VLT_FIXED_VA: printf("fxd_va[%d]", var->loc.vlFixedVarArg.vlfvOffset); break; #endif // !TARGET_AMD64 default: unreached(); // unexpected } printf("\n"); } // Same parameters as ICorStaticInfo::setVars(). void Compiler::eeDispVars(CORINFO_METHOD_HANDLE ftn, ULONG32 cVars, ICorDebugInfo::NativeVarInfo* vars) { // Estimate number of unique vars with debug info // ALLVARSET_TP uniqueVars(AllVarSetOps::MakeEmpty(this)); for (unsigned i = 0; i < cVars; i++) { // ignore "special vars" and out of bounds vars if ((((int)vars[i].varNumber) >= 0) && (vars[i].varNumber < lclMAX_ALLSET_TRACKED)) { AllVarSetOps::AddElemD(this, uniqueVars, vars[i].varNumber); } } printf("; Variable debug info: %d live ranges, %d vars for method %s\n", cVars, AllVarSetOps::Count(this, uniqueVars), info.compFullName); for (unsigned i = 0; i < cVars; i++) { eeDispVar(&vars[i]); } } #endif // DEBUG /***************************************************************************** * * Debugging support - Line number info */ void Compiler::eeSetLIcount(unsigned count) { assert(opts.compDbgInfo); eeBoundariesCount = count; if (eeBoundariesCount) { eeBoundaries = (ICorDebugInfo::OffsetMapping*)info.compCompHnd->allocateArray(eeBoundariesCount * sizeof(eeBoundaries[0])); } else { eeBoundaries = nullptr; } } void Compiler::eeSetLIinfo(unsigned which, UNATIVE_OFFSET nativeOffset, IPmappingDscKind kind, const ILLocation& loc) { assert(opts.compDbgInfo); assert(eeBoundariesCount > 0 && eeBoundaries != nullptr); assert(which < eeBoundariesCount); eeBoundaries[which].nativeOffset = nativeOffset; eeBoundaries[which].source = (ICorDebugInfo::SourceTypes)0; switch (kind) { int source; case IPmappingDscKind::Normal: eeBoundaries[which].ilOffset = loc.GetOffset(); source = loc.IsStackEmpty() ? ICorDebugInfo::STACK_EMPTY : 0; source |= loc.IsCall() ? ICorDebugInfo::CALL_INSTRUCTION : 0; eeBoundaries[which].source = (ICorDebugInfo::SourceTypes)source; break; case IPmappingDscKind::Prolog: eeBoundaries[which].ilOffset = ICorDebugInfo::PROLOG; eeBoundaries[which].source = ICorDebugInfo::STACK_EMPTY; break; case IPmappingDscKind::Epilog: eeBoundaries[which].ilOffset = ICorDebugInfo::EPILOG; eeBoundaries[which].source = ICorDebugInfo::STACK_EMPTY; break; case IPmappingDscKind::NoMapping: eeBoundaries[which].ilOffset = ICorDebugInfo::NO_MAPPING; eeBoundaries[which].source = ICorDebugInfo::STACK_EMPTY; break; default: unreached(); } } void Compiler::eeSetLIdone() { assert(opts.compDbgInfo); #if defined(DEBUG) if (verbose || opts.dspDebugInfo) { eeDispLineInfos(); } #endif // DEBUG // necessary but not sufficient condition that the 2 struct definitions overlap assert(sizeof(eeBoundaries[0]) == sizeof(ICorDebugInfo::OffsetMapping)); info.compCompHnd->setBoundaries(info.compMethodHnd, eeBoundariesCount, (ICorDebugInfo::OffsetMapping*)eeBoundaries); eeBoundaries = nullptr; // we give up ownership after setBoundaries(); } #if defined(DEBUG) void Compiler::eeDispILOffs(IL_OFFSET offs) { printf("0x%04X", offs); } /* static */ void Compiler::eeDispSourceMappingOffs(uint32_t offs) { const char* specialOffs[] = {"EPILOG", "PROLOG", "NO_MAP"}; switch ((int)offs) // Need the cast since offs is unsigned and the case statements are comparing to signed. { case ICorDebugInfo::EPILOG: case ICorDebugInfo::PROLOG: case ICorDebugInfo::NO_MAPPING: assert(DWORD(ICorDebugInfo::EPILOG) + 1 == (unsigned)ICorDebugInfo::PROLOG); assert(DWORD(ICorDebugInfo::EPILOG) + 2 == (unsigned)ICorDebugInfo::NO_MAPPING); int specialOffsNum; specialOffsNum = offs - DWORD(ICorDebugInfo::EPILOG); printf("%s", specialOffs[specialOffsNum]); break; default: eeDispILOffs(offs); break; } } /* static */ void Compiler::eeDispLineInfo(const ICorDebugInfo::OffsetMapping* line) { printf("IL offs "); eeDispSourceMappingOffs(line->ilOffset); printf(" : 0x%08X", line->nativeOffset); if (line->source != 0) { // It seems like it should probably never be zero since ICorDebugInfo::SOURCE_TYPE_INVALID is zero. // However, the JIT has always generated this and printed "stack non-empty". printf(" ( "); if ((line->source & ICorDebugInfo::STACK_EMPTY) != 0) { printf("STACK_EMPTY "); } if ((line->source & ICorDebugInfo::CALL_INSTRUCTION) != 0) { printf("CALL_INSTRUCTION "); } if ((line->source & ICorDebugInfo::CALL_SITE) != 0) { printf("CALL_SITE "); } printf(")"); } printf("\n"); // We don't expect to see any other bits. assert((line->source & ~(ICorDebugInfo::STACK_EMPTY | ICorDebugInfo::CALL_INSTRUCTION)) == 0); } void Compiler::eeDispLineInfos() { printf("IP mapping count : %d\n", eeBoundariesCount); // this might be zero for (unsigned i = 0; i < eeBoundariesCount; i++) { eeDispLineInfo(&eeBoundaries[i]); } printf("\n"); } #endif // DEBUG /***************************************************************************** * * ICorJitInfo wrapper functions * * In many cases here, we don't tell the VM about various unwind or EH information if * we're an altjit for an unexpected architecture. If it's not a same architecture JIT * (e.g., host AMD64, target ARM64), then VM will get confused anyway. */ void Compiler::eeReserveUnwindInfo(bool isFunclet, bool isColdCode, ULONG unwindSize) { #ifdef DEBUG if (verbose) { printf("reserveUnwindInfo(isFunclet=%s, isColdCode=%s, unwindSize=0x%x)\n", isFunclet ? "true" : "false", isColdCode ? "true" : "false", unwindSize); } #endif // DEBUG if (info.compMatchedVM) { info.compCompHnd->reserveUnwindInfo(isFunclet, isColdCode, unwindSize); } } void Compiler::eeAllocUnwindInfo(BYTE* pHotCode, BYTE* pColdCode, ULONG startOffset, ULONG endOffset, ULONG unwindSize, BYTE* pUnwindBlock, CorJitFuncKind funcKind) { #ifdef DEBUG if (verbose) { printf("allocUnwindInfo(pHotCode=0x%p, pColdCode=0x%p, startOffset=0x%x, endOffset=0x%x, unwindSize=0x%x, " "pUnwindBlock=0x%p, funKind=%d", dspPtr(pHotCode), dspPtr(pColdCode), startOffset, endOffset, unwindSize, dspPtr(pUnwindBlock), funcKind); switch (funcKind) { case CORJIT_FUNC_ROOT: printf(" (main function)"); break; case CORJIT_FUNC_HANDLER: printf(" (handler)"); break; case CORJIT_FUNC_FILTER: printf(" (filter)"); break; default: printf(" (ILLEGAL)"); break; } printf(")\n"); } #endif // DEBUG if (info.compMatchedVM) { info.compCompHnd->allocUnwindInfo(pHotCode, pColdCode, startOffset, endOffset, unwindSize, pUnwindBlock, funcKind); } } void Compiler::eeSetEHcount(unsigned cEH) { #ifdef DEBUG if (verbose) { printf("setEHcount(cEH=%u)\n", cEH); } #endif // DEBUG if (info.compMatchedVM) { info.compCompHnd->setEHcount(cEH); } } void Compiler::eeSetEHinfo(unsigned EHnumber, const CORINFO_EH_CLAUSE* clause) { #ifdef DEBUG if (opts.dspEHTable) { dispOutgoingEHClause(EHnumber, *clause); } #endif // DEBUG if (info.compMatchedVM) { info.compCompHnd->setEHinfo(EHnumber, clause); } } WORD Compiler::eeGetRelocTypeHint(void* target) { if (info.compMatchedVM) { return info.compCompHnd->getRelocTypeHint(target); } else { // No hints return (WORD)-1; } } CORINFO_FIELD_HANDLE Compiler::eeFindJitDataOffs(unsigned dataOffs) { // Data offsets are marked by the fact that the low two bits are 0b01 0x1 assert(dataOffs < 0x40000000); return (CORINFO_FIELD_HANDLE)(size_t)((dataOffs << iaut_SHIFT) | iaut_DATA_OFFSET); } bool Compiler::eeIsJitDataOffs(CORINFO_FIELD_HANDLE field) { // if 'field' is a jit data offset it has to fit into a 32-bit unsigned int unsigned value = static_cast<unsigned>(reinterpret_cast<uintptr_t>(field)); if (((CORINFO_FIELD_HANDLE)(size_t)value) != field) { return false; // some bits in the upper 32 bits were set, not a jit data offset } // Data offsets are marked by the fact that the low two bits are 0b01 return (value & iaut_MASK) == iaut_DATA_OFFSET; } int Compiler::eeGetJitDataOffs(CORINFO_FIELD_HANDLE field) { // Data offsets are marked by the fact that the low two bits are 0b01 0x1 if (eeIsJitDataOffs(field)) { unsigned dataOffs = static_cast<unsigned>(reinterpret_cast<uintptr_t>(field)); assert(((CORINFO_FIELD_HANDLE)(size_t)dataOffs) == field); assert(dataOffs < 0x40000000); // Shift away the low two bits return (static_cast<int>(reinterpret_cast<intptr_t>(field))) >> iaut_SHIFT; } else { return -1; } } /***************************************************************************** * * ICorStaticInfo wrapper functions */ #if defined(UNIX_AMD64_ABI) #ifdef DEBUG void Compiler::dumpSystemVClassificationType(SystemVClassificationType ct) { switch (ct) { case SystemVClassificationTypeUnknown: printf("UNKNOWN"); break; case SystemVClassificationTypeStruct: printf("Struct"); break; case SystemVClassificationTypeNoClass: printf("NoClass"); break; case SystemVClassificationTypeMemory: printf("Memory"); break; case SystemVClassificationTypeInteger: printf("Integer"); break; case SystemVClassificationTypeIntegerReference: printf("IntegerReference"); break; case SystemVClassificationTypeIntegerByRef: printf("IntegerByReference"); break; case SystemVClassificationTypeSSE: printf("SSE"); break; default: printf("ILLEGAL"); break; } } #endif // DEBUG void Compiler::eeGetSystemVAmd64PassStructInRegisterDescriptor( /*IN*/ CORINFO_CLASS_HANDLE structHnd, /*OUT*/ SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* structPassInRegDescPtr) { bool ok = info.compCompHnd->getSystemVAmd64PassStructInRegisterDescriptor(structHnd, structPassInRegDescPtr); noway_assert(ok); #ifdef DEBUG if (verbose) { printf("**** getSystemVAmd64PassStructInRegisterDescriptor(0x%x (%s), ...) =>\n", dspPtr(structHnd), eeGetClassName(structHnd)); printf(" passedInRegisters = %s\n", dspBool(structPassInRegDescPtr->passedInRegisters)); if (structPassInRegDescPtr->passedInRegisters) { printf(" eightByteCount = %d\n", structPassInRegDescPtr->eightByteCount); for (unsigned int i = 0; i < structPassInRegDescPtr->eightByteCount; i++) { printf(" eightByte #%d -- classification: ", i); dumpSystemVClassificationType(structPassInRegDescPtr->eightByteClassifications[i]); printf(", byteSize: %d, byteOffset: %d\n", structPassInRegDescPtr->eightByteSizes[i], structPassInRegDescPtr->eightByteOffsets[i]); } } } #endif // DEBUG } #endif // UNIX_AMD64_ABI bool Compiler::eeTryResolveToken(CORINFO_RESOLVED_TOKEN* resolvedToken) { return info.compCompHnd->tryResolveToken(resolvedToken); } bool Compiler::eeRunWithErrorTrapImp(void (*function)(void*), void* param) { return info.compCompHnd->runWithErrorTrap(function, param); } bool Compiler::eeRunWithSPMIErrorTrapImp(void (*function)(void*), void* param) { return info.compCompHnd->runWithSPMIErrorTrap(function, param); } /***************************************************************************** * * Utility functions */ #if defined(DEBUG) || defined(FEATURE_JIT_METHOD_PERF) || defined(FEATURE_SIMD) || defined(FEATURE_TRACELOGGING) /*****************************************************************************/ // static helper names - constant array const char* jitHlpFuncTable[CORINFO_HELP_COUNT] = { #define JITHELPER(code, pfnHelper, sig) #code, #define DYNAMICJITHELPER(code, pfnHelper, sig) #code, #include "jithelpers.h" }; /***************************************************************************** * * Filter wrapper to handle exception filtering. * On Unix compilers don't support SEH. */ struct FilterSuperPMIExceptionsParam_ee_il { Compiler* pThis; Compiler::Info* pJitInfo; CORINFO_FIELD_HANDLE field; CORINFO_METHOD_HANDLE method; CORINFO_CLASS_HANDLE clazz; const char** classNamePtr; const char* fieldOrMethodOrClassNamePtr; EXCEPTION_POINTERS exceptionPointers; }; const char* Compiler::eeGetMethodName(CORINFO_METHOD_HANDLE method, const char** classNamePtr) { if (eeGetHelperNum(method) != CORINFO_HELP_UNDEF) { if (classNamePtr != nullptr) { *classNamePtr = "HELPER"; } CorInfoHelpFunc ftnNum = eeGetHelperNum(method); const char* name = info.compCompHnd->getHelperName(ftnNum); // If it's something unknown from a RET VM, or from SuperPMI, then use our own helper name table. if ((strcmp(name, "AnyJITHelper") == 0) || (strcmp(name, "Yickish helper name") == 0)) { if ((unsigned)ftnNum < CORINFO_HELP_COUNT) { name = jitHlpFuncTable[ftnNum]; } } return name; } if (eeIsNativeMethod(method)) { if (classNamePtr != nullptr) { *classNamePtr = "NATIVE"; } method = eeGetMethodHandleForNative(method); } FilterSuperPMIExceptionsParam_ee_il param; param.pThis = this; param.pJitInfo = &info; param.method = method; param.classNamePtr = classNamePtr; bool success = eeRunWithSPMIErrorTrap<FilterSuperPMIExceptionsParam_ee_il>( [](FilterSuperPMIExceptionsParam_ee_il* pParam) { pParam->fieldOrMethodOrClassNamePtr = pParam->pJitInfo->compCompHnd->getMethodName(pParam->method, pParam->classNamePtr); }, &param); if (!success) { if (param.classNamePtr != nullptr) { *(param.classNamePtr) = "hackishClassName"; } param.fieldOrMethodOrClassNamePtr = "hackishMethodName"; } return param.fieldOrMethodOrClassNamePtr; } const char* Compiler::eeGetFieldName(CORINFO_FIELD_HANDLE field, const char** classNamePtr) { FilterSuperPMIExceptionsParam_ee_il param; param.pThis = this; param.pJitInfo = &info; param.field = field; param.classNamePtr = classNamePtr; bool success = eeRunWithSPMIErrorTrap<FilterSuperPMIExceptionsParam_ee_il>( [](FilterSuperPMIExceptionsParam_ee_il* pParam) { pParam->fieldOrMethodOrClassNamePtr = pParam->pJitInfo->compCompHnd->getFieldName(pParam->field, pParam->classNamePtr); }, &param); if (!success) { param.fieldOrMethodOrClassNamePtr = "hackishFieldName"; } return param.fieldOrMethodOrClassNamePtr; } const char* Compiler::eeGetClassName(CORINFO_CLASS_HANDLE clsHnd) { FilterSuperPMIExceptionsParam_ee_il param; param.pThis = this; param.pJitInfo = &info; param.clazz = clsHnd; bool success = eeRunWithSPMIErrorTrap<FilterSuperPMIExceptionsParam_ee_il>( [](FilterSuperPMIExceptionsParam_ee_il* pParam) { pParam->fieldOrMethodOrClassNamePtr = pParam->pJitInfo->compCompHnd->getClassName(pParam->clazz); }, &param); if (!success) { param.fieldOrMethodOrClassNamePtr = "hackishClassName"; } return param.fieldOrMethodOrClassNamePtr; } #endif // DEBUG || FEATURE_JIT_METHOD_PERF #ifdef DEBUG const WCHAR* Compiler::eeGetCPString(size_t strHandle) { #ifdef HOST_UNIX return nullptr; #else char buff[512 + sizeof(CORINFO_String)]; // make this bulletproof, so it works even if we are wrong. if (ReadProcessMemory(GetCurrentProcess(), (void*)strHandle, buff, 4, nullptr) == 0) { return (nullptr); } CORINFO_String* asString = *((CORINFO_String**)strHandle); if (ReadProcessMemory(GetCurrentProcess(), asString, buff, sizeof(buff), nullptr) == 0) { return (nullptr); } if (asString->stringLen >= 255 || asString->chars[asString->stringLen] != 0) { return nullptr; } return (WCHAR*)(asString->chars); #endif // HOST_UNIX } #endif // DEBUG
1
dotnet/runtime
66,367
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled
When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
jakobbotsch
2022-03-09T00:01:53Z
2022-03-09T12:59:09Z
c9f7f7389e8e9a00d501aef696333b67d218baac
cef825fd5425203ac28d073bf9fccc33c638f179
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled. When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
./src/coreclr/jit/lclvars.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX LclVarsInfo XX XX XX XX The variables to be used by the code generator. XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif #include "emit.h" #include "register_arg_convention.h" #include "jitstd/algorithm.h" #include "patchpointinfo.h" /*****************************************************************************/ #ifdef DEBUG #if DOUBLE_ALIGN /* static */ unsigned Compiler::s_lvaDoubleAlignedProcsCount = 0; #endif #endif /*****************************************************************************/ void Compiler::lvaInit() { /* We haven't allocated stack variables yet */ lvaRefCountState = RCS_INVALID; lvaGenericsContextInUse = false; lvaTrackedToVarNumSize = 0; lvaTrackedToVarNum = nullptr; lvaTrackedFixed = false; // false: We can still add new tracked variables lvaDoneFrameLayout = NO_FRAME_LAYOUT; #if !defined(FEATURE_EH_FUNCLETS) lvaShadowSPslotsVar = BAD_VAR_NUM; #endif // !FEATURE_EH_FUNCLETS lvaInlinedPInvokeFrameVar = BAD_VAR_NUM; lvaReversePInvokeFrameVar = BAD_VAR_NUM; #if FEATURE_FIXED_OUT_ARGS lvaPInvokeFrameRegSaveVar = BAD_VAR_NUM; lvaOutgoingArgSpaceVar = BAD_VAR_NUM; lvaOutgoingArgSpaceSize = PhasedVar<unsigned>(); #endif // FEATURE_FIXED_OUT_ARGS #ifdef JIT32_GCENCODER lvaLocAllocSPvar = BAD_VAR_NUM; #endif // JIT32_GCENCODER lvaNewObjArrayArgs = BAD_VAR_NUM; lvaGSSecurityCookie = BAD_VAR_NUM; #ifdef TARGET_X86 lvaVarargsBaseOfStkArgs = BAD_VAR_NUM; #endif // TARGET_X86 lvaVarargsHandleArg = BAD_VAR_NUM; lvaStubArgumentVar = BAD_VAR_NUM; lvaArg0Var = BAD_VAR_NUM; lvaMonAcquired = BAD_VAR_NUM; lvaRetAddrVar = BAD_VAR_NUM; lvaInlineeReturnSpillTemp = BAD_VAR_NUM; gsShadowVarInfo = nullptr; #if defined(FEATURE_EH_FUNCLETS) lvaPSPSym = BAD_VAR_NUM; #endif #if FEATURE_SIMD lvaSIMDInitTempVarNum = BAD_VAR_NUM; #endif // FEATURE_SIMD lvaCurEpoch = 0; structPromotionHelper = new (this, CMK_Generic) StructPromotionHelper(this); } /*****************************************************************************/ void Compiler::lvaInitTypeRef() { /* x86 args look something like this: [this ptr] [hidden return buffer] [declared arguments]* [generic context] [var arg cookie] x64 is closer to the native ABI: [this ptr] [hidden return buffer] [generic context] [var arg cookie] [declared arguments]* (Note: prior to .NET Framework 4.5.1 for Windows 8.1 (but not .NET Framework 4.5.1 "downlevel"), the "hidden return buffer" came before the "this ptr". Now, the "this ptr" comes first. This is different from the C++ order, where the "hidden return buffer" always comes first.) ARM and ARM64 are the same as the current x64 convention: [this ptr] [hidden return buffer] [generic context] [var arg cookie] [declared arguments]* Key difference: The var arg cookie and generic context are swapped with respect to the user arguments */ /* Set compArgsCount and compLocalsCount */ info.compArgsCount = info.compMethodInfo->args.numArgs; // Is there a 'this' pointer if (!info.compIsStatic) { info.compArgsCount++; } else { info.compThisArg = BAD_VAR_NUM; } info.compILargsCount = info.compArgsCount; #ifdef FEATURE_SIMD if (supportSIMDTypes() && (info.compRetNativeType == TYP_STRUCT)) { var_types structType = impNormStructType(info.compMethodInfo->args.retTypeClass); info.compRetType = structType; } #endif // FEATURE_SIMD // Are we returning a struct using a return buffer argument? // const bool hasRetBuffArg = impMethodInfo_hasRetBuffArg(info.compMethodInfo, info.compCallConv); // Possibly change the compRetNativeType from TYP_STRUCT to a "primitive" type // when we are returning a struct by value and it fits in one register // if (!hasRetBuffArg && varTypeIsStruct(info.compRetNativeType)) { CORINFO_CLASS_HANDLE retClsHnd = info.compMethodInfo->args.retTypeClass; Compiler::structPassingKind howToReturnStruct; var_types returnType = getReturnTypeForStruct(retClsHnd, info.compCallConv, &howToReturnStruct); // We can safely widen the return type for enclosed structs. if ((howToReturnStruct == SPK_PrimitiveType) || (howToReturnStruct == SPK_EnclosingType)) { assert(returnType != TYP_UNKNOWN); assert(returnType != TYP_STRUCT); info.compRetNativeType = returnType; // ToDo: Refactor this common code sequence into its own method as it is used 4+ times if ((returnType == TYP_LONG) && (compLongUsed == false)) { compLongUsed = true; } else if (((returnType == TYP_FLOAT) || (returnType == TYP_DOUBLE)) && (compFloatingPointUsed == false)) { compFloatingPointUsed = true; } } } // Do we have a RetBuffArg? if (hasRetBuffArg) { info.compArgsCount++; } else { info.compRetBuffArg = BAD_VAR_NUM; } /* There is a 'hidden' cookie pushed last when the calling convention is varargs */ if (info.compIsVarArgs) { info.compArgsCount++; } // Is there an extra parameter used to pass instantiation info to // shared generic methods and shared generic struct instance methods? if (info.compMethodInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) { info.compArgsCount++; } else { info.compTypeCtxtArg = BAD_VAR_NUM; } lvaCount = info.compLocalsCount = info.compArgsCount + info.compMethodInfo->locals.numArgs; info.compILlocalsCount = info.compILargsCount + info.compMethodInfo->locals.numArgs; /* Now allocate the variable descriptor table */ if (compIsForInlining()) { lvaTable = impInlineInfo->InlinerCompiler->lvaTable; lvaCount = impInlineInfo->InlinerCompiler->lvaCount; lvaTableCnt = impInlineInfo->InlinerCompiler->lvaTableCnt; // No more stuff needs to be done. return; } lvaTableCnt = lvaCount * 2; if (lvaTableCnt < 16) { lvaTableCnt = 16; } lvaTable = getAllocator(CMK_LvaTable).allocate<LclVarDsc>(lvaTableCnt); size_t tableSize = lvaTableCnt * sizeof(*lvaTable); memset(lvaTable, 0, tableSize); for (unsigned i = 0; i < lvaTableCnt; i++) { new (&lvaTable[i], jitstd::placement_t()) LclVarDsc(); // call the constructor. } //------------------------------------------------------------------------- // Count the arguments and initialize the respective lvaTable[] entries // // First the implicit arguments //------------------------------------------------------------------------- InitVarDscInfo varDscInfo; #ifdef TARGET_X86 // x86 unmanaged calling conventions limit the number of registers supported // for accepting arguments. As a result, we need to modify the number of registers // when we emit a method with an unmanaged calling convention. switch (info.compCallConv) { case CorInfoCallConvExtension::Thiscall: // In thiscall the this parameter goes into a register. varDscInfo.Init(lvaTable, hasRetBuffArg, 1, 0); break; case CorInfoCallConvExtension::C: case CorInfoCallConvExtension::Stdcall: case CorInfoCallConvExtension::CMemberFunction: case CorInfoCallConvExtension::StdcallMemberFunction: varDscInfo.Init(lvaTable, hasRetBuffArg, 0, 0); break; case CorInfoCallConvExtension::Managed: case CorInfoCallConvExtension::Fastcall: case CorInfoCallConvExtension::FastcallMemberFunction: default: varDscInfo.Init(lvaTable, hasRetBuffArg, MAX_REG_ARG, MAX_FLOAT_REG_ARG); break; } #else varDscInfo.Init(lvaTable, hasRetBuffArg, MAX_REG_ARG, MAX_FLOAT_REG_ARG); #endif lvaInitArgs(&varDscInfo); //------------------------------------------------------------------------- // Finally the local variables //------------------------------------------------------------------------- unsigned varNum = varDscInfo.varNum; LclVarDsc* varDsc = varDscInfo.varDsc; CORINFO_ARG_LIST_HANDLE localsSig = info.compMethodInfo->locals.args; for (unsigned i = 0; i < info.compMethodInfo->locals.numArgs; i++, varNum++, varDsc++, localsSig = info.compCompHnd->getArgNext(localsSig)) { CORINFO_CLASS_HANDLE typeHnd; CorInfoTypeWithMod corInfoTypeWithMod = info.compCompHnd->getArgType(&info.compMethodInfo->locals, localsSig, &typeHnd); CorInfoType corInfoType = strip(corInfoTypeWithMod); lvaInitVarDsc(varDsc, varNum, corInfoType, typeHnd, localsSig, &info.compMethodInfo->locals); if ((corInfoTypeWithMod & CORINFO_TYPE_MOD_PINNED) != 0) { if ((corInfoType == CORINFO_TYPE_CLASS) || (corInfoType == CORINFO_TYPE_BYREF)) { JITDUMP("Setting lvPinned for V%02u\n", varNum); varDsc->lvPinned = 1; } else { JITDUMP("Ignoring pin for non-GC type V%02u\n", varNum); } } varDsc->lvOnFrame = true; // The final home for this local variable might be our local stack frame if (corInfoType == CORINFO_TYPE_CLASS) { CORINFO_CLASS_HANDLE clsHnd = info.compCompHnd->getArgClass(&info.compMethodInfo->locals, localsSig); lvaSetClass(varNum, clsHnd); } if (opts.IsOSR() && info.compPatchpointInfo->IsExposed(varNum)) { JITDUMP("-- V%02u is OSR exposed\n", varNum); varDsc->lvHasLdAddrOp = 1; // todo: Why does it apply only to non-structs? // if (!varTypeIsStruct(varDsc) && !varTypeIsSIMD(varDsc)) { lvaSetVarAddrExposed(varNum DEBUGARG(AddressExposedReason::OSR_EXPOSED)); } } } if ( // If there already exist unsafe buffers, don't mark more structs as unsafe // as that will cause them to be placed along with the real unsafe buffers, // unnecessarily exposing them to overruns. This can affect GS tests which // intentionally do buffer-overruns. !getNeedsGSSecurityCookie() && // GS checks require the stack to be re-ordered, which can't be done with EnC !opts.compDbgEnC && compStressCompile(STRESS_UNSAFE_BUFFER_CHECKS, 25)) { setNeedsGSSecurityCookie(); compGSReorderStackLayout = true; for (unsigned i = 0; i < lvaCount; i++) { if ((lvaTable[i].lvType == TYP_STRUCT) && compStressCompile(STRESS_GENERIC_VARN, 60)) { lvaTable[i].lvIsUnsafeBuffer = true; } } } if (getNeedsGSSecurityCookie()) { // Ensure that there will be at least one stack variable since // we require that the GSCookie does not have a 0 stack offset. unsigned dummy = lvaGrabTempWithImplicitUse(false DEBUGARG("GSCookie dummy")); LclVarDsc* gsCookieDummy = lvaGetDesc(dummy); gsCookieDummy->lvType = TYP_INT; gsCookieDummy->lvIsTemp = true; // It is not alive at all, set the flag to prevent zero-init. lvaSetVarDoNotEnregister(dummy DEBUGARG(DoNotEnregisterReason::VMNeedsStackAddr)); } // Allocate the lvaOutgoingArgSpaceVar now because we can run into problems in the // emitter when the varNum is greater that 32767 (see emitLclVarAddr::initLclVarAddr) lvaAllocOutgoingArgSpaceVar(); #ifdef DEBUG if (verbose) { lvaTableDump(INITIAL_FRAME_LAYOUT); } #endif } /*****************************************************************************/ void Compiler::lvaInitArgs(InitVarDscInfo* varDscInfo) { compArgSize = 0; #if defined(TARGET_ARM) && defined(PROFILING_SUPPORTED) // Prespill all argument regs on to stack in case of Arm when under profiler. if (compIsProfilerHookNeeded()) { codeGen->regSet.rsMaskPreSpillRegArg |= RBM_ARG_REGS; } #endif //---------------------------------------------------------------------- /* Is there a "this" pointer ? */ lvaInitThisPtr(varDscInfo); unsigned numUserArgsToSkip = 0; unsigned numUserArgs = info.compMethodInfo->args.numArgs; #if !defined(TARGET_ARM) if (TargetOS::IsWindows && callConvIsInstanceMethodCallConv(info.compCallConv)) { // If we are a native instance method, handle the first user arg // (the unmanaged this parameter) and then handle the hidden // return buffer parameter. assert(numUserArgs >= 1); lvaInitUserArgs(varDscInfo, 0, 1); numUserArgsToSkip++; numUserArgs--; lvaInitRetBuffArg(varDscInfo, false); } else #endif { /* If we have a hidden return-buffer parameter, that comes here */ lvaInitRetBuffArg(varDscInfo, true); } //====================================================================== #if USER_ARGS_COME_LAST //@GENERICS: final instantiation-info argument for shared generic methods // and shared generic struct instance methods lvaInitGenericsCtxt(varDscInfo); /* If the method is varargs, process the varargs cookie */ lvaInitVarArgsHandle(varDscInfo); #endif //------------------------------------------------------------------------- // Now walk the function signature for the explicit user arguments //------------------------------------------------------------------------- lvaInitUserArgs(varDscInfo, numUserArgsToSkip, numUserArgs); #if !USER_ARGS_COME_LAST //@GENERICS: final instantiation-info argument for shared generic methods // and shared generic struct instance methods lvaInitGenericsCtxt(varDscInfo); /* If the method is varargs, process the varargs cookie */ lvaInitVarArgsHandle(varDscInfo); #endif //---------------------------------------------------------------------- // We have set info.compArgsCount in compCompile() noway_assert(varDscInfo->varNum == info.compArgsCount); assert(varDscInfo->intRegArgNum <= MAX_REG_ARG); codeGen->intRegState.rsCalleeRegArgCount = varDscInfo->intRegArgNum; codeGen->floatRegState.rsCalleeRegArgCount = varDscInfo->floatRegArgNum; #if FEATURE_FASTTAILCALL // Save the stack usage information // We can get register usage information using codeGen->intRegState and // codeGen->floatRegState info.compArgStackSize = varDscInfo->stackArgSize; #endif // FEATURE_FASTTAILCALL // The total argument size must be aligned. noway_assert((compArgSize % TARGET_POINTER_SIZE) == 0); #ifdef TARGET_X86 /* We can not pass more than 2^16 dwords as arguments as the "ret" instruction can only pop 2^16 arguments. Could be handled correctly but it will be very difficult for fully interruptible code */ if (compArgSize != (size_t)(unsigned short)compArgSize) IMPL_LIMITATION("Too many arguments for the \"ret\" instruction to pop"); #endif } /*****************************************************************************/ void Compiler::lvaInitThisPtr(InitVarDscInfo* varDscInfo) { LclVarDsc* varDsc = varDscInfo->varDsc; if (!info.compIsStatic) { varDsc->lvIsParam = 1; varDsc->lvIsPtr = 1; lvaArg0Var = info.compThisArg = varDscInfo->varNum; noway_assert(info.compThisArg == 0); if (eeIsValueClass(info.compClassHnd)) { varDsc->lvType = TYP_BYREF; #ifdef FEATURE_SIMD if (supportSIMDTypes()) { CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF; var_types type = impNormStructType(info.compClassHnd, &simdBaseJitType); if (simdBaseJitType != CORINFO_TYPE_UNDEF) { assert(varTypeIsSIMD(type)); varDsc->lvSIMDType = true; varDsc->SetSimdBaseJitType(simdBaseJitType); varDsc->lvExactSize = genTypeSize(type); } } #endif // FEATURE_SIMD } else { varDsc->lvType = TYP_REF; lvaSetClass(varDscInfo->varNum, info.compClassHnd); } varDsc->lvVerTypeInfo = typeInfo(); // Mark the 'this' pointer for the method varDsc->lvVerTypeInfo.SetIsThisPtr(); varDsc->lvIsRegArg = 1; noway_assert(varDscInfo->intRegArgNum == 0); varDsc->SetArgReg(genMapRegArgNumToRegNum(varDscInfo->allocRegArg(TYP_INT), varDsc->TypeGet())); #if FEATURE_MULTIREG_ARGS varDsc->SetOtherArgReg(REG_NA); #endif varDsc->lvOnFrame = true; // The final home for this incoming register might be our local stack frame #ifdef DEBUG if (verbose) { printf("'this' passed in register %s\n", getRegName(varDsc->GetArgReg())); } #endif compArgSize += TARGET_POINTER_SIZE; varDscInfo->varNum++; varDscInfo->varDsc++; } } /*****************************************************************************/ void Compiler::lvaInitRetBuffArg(InitVarDscInfo* varDscInfo, bool useFixedRetBufReg) { LclVarDsc* varDsc = varDscInfo->varDsc; bool hasRetBuffArg = impMethodInfo_hasRetBuffArg(info.compMethodInfo, info.compCallConv); // These two should always match noway_assert(hasRetBuffArg == varDscInfo->hasRetBufArg); if (hasRetBuffArg) { info.compRetBuffArg = varDscInfo->varNum; varDsc->lvType = TYP_BYREF; varDsc->lvIsParam = 1; varDsc->lvIsRegArg = 0; if (useFixedRetBufReg && hasFixedRetBuffReg()) { varDsc->lvIsRegArg = 1; varDsc->SetArgReg(theFixedRetBuffReg()); } else if (varDscInfo->canEnreg(TYP_INT)) { varDsc->lvIsRegArg = 1; unsigned retBuffArgNum = varDscInfo->allocRegArg(TYP_INT); varDsc->SetArgReg(genMapIntRegArgNumToRegNum(retBuffArgNum)); } #if FEATURE_MULTIREG_ARGS varDsc->SetOtherArgReg(REG_NA); #endif varDsc->lvOnFrame = true; // The final home for this incoming register might be our local stack frame #ifdef FEATURE_SIMD if (supportSIMDTypes() && varTypeIsSIMD(info.compRetType)) { varDsc->lvSIMDType = true; CorInfoType simdBaseJitType = getBaseJitTypeAndSizeOfSIMDType(info.compMethodInfo->args.retTypeClass, &varDsc->lvExactSize); varDsc->SetSimdBaseJitType(simdBaseJitType); assert(varDsc->GetSimdBaseType() != TYP_UNKNOWN); } #endif // FEATURE_SIMD assert(!varDsc->lvIsRegArg || isValidIntArgReg(varDsc->GetArgReg())); #ifdef DEBUG if (varDsc->lvIsRegArg && verbose) { printf("'__retBuf' passed in register %s\n", getRegName(varDsc->GetArgReg())); } #endif /* Update the total argument size, count and varDsc */ compArgSize += TARGET_POINTER_SIZE; varDscInfo->varNum++; varDscInfo->varDsc++; } } //----------------------------------------------------------------------------- // lvaInitUserArgs: // Initialize local var descriptions for incoming user arguments // // Arguments: // varDscInfo - the local var descriptions // skipArgs - the number of user args to skip processing. // takeArgs - the number of user args to process (after skipping skipArgs number of args) // void Compiler::lvaInitUserArgs(InitVarDscInfo* varDscInfo, unsigned skipArgs, unsigned takeArgs) { //------------------------------------------------------------------------- // Walk the function signature for the explicit arguments //------------------------------------------------------------------------- #if defined(TARGET_X86) // Only (some of) the implicit args are enregistered for varargs if (info.compIsVarArgs) { varDscInfo->maxIntRegArgNum = varDscInfo->intRegArgNum; } #elif defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI) // On System V type environment the float registers are not indexed together with the int ones. varDscInfo->floatRegArgNum = varDscInfo->intRegArgNum; #endif // TARGET* CORINFO_ARG_LIST_HANDLE argLst = info.compMethodInfo->args.args; const unsigned argSigLen = info.compMethodInfo->args.numArgs; // We will process at most takeArgs arguments from the signature after skipping skipArgs arguments const int64_t numUserArgs = min(takeArgs, (argSigLen - (int64_t)skipArgs)); // If there are no user args or less than skipArgs args, return here since there's no work to do. if (numUserArgs <= 0) { return; } #ifdef TARGET_ARM regMaskTP doubleAlignMask = RBM_NONE; #endif // TARGET_ARM // Skip skipArgs arguments from the signature. for (unsigned i = 0; i < skipArgs; i++, argLst = info.compCompHnd->getArgNext(argLst)) { ; } // Process each user arg. for (unsigned i = 0; i < numUserArgs; i++, varDscInfo->varNum++, varDscInfo->varDsc++, argLst = info.compCompHnd->getArgNext(argLst)) { LclVarDsc* varDsc = varDscInfo->varDsc; CORINFO_CLASS_HANDLE typeHnd = nullptr; CorInfoTypeWithMod corInfoType = info.compCompHnd->getArgType(&info.compMethodInfo->args, argLst, &typeHnd); varDsc->lvIsParam = 1; lvaInitVarDsc(varDsc, varDscInfo->varNum, strip(corInfoType), typeHnd, argLst, &info.compMethodInfo->args); if (strip(corInfoType) == CORINFO_TYPE_CLASS) { CORINFO_CLASS_HANDLE clsHnd = info.compCompHnd->getArgClass(&info.compMethodInfo->args, argLst); lvaSetClass(varDscInfo->varNum, clsHnd); } // For ARM, ARM64, and AMD64 varargs, all arguments go in integer registers var_types argType = mangleVarArgsType(varDsc->TypeGet()); var_types origArgType = argType; // ARM softfp calling convention should affect only the floating point arguments. // Otherwise there appear too many surplus pre-spills and other memory operations // with the associated locations . bool isSoftFPPreSpill = opts.compUseSoftFP && varTypeIsFloating(varDsc->TypeGet()); unsigned argSize = eeGetArgSize(argLst, &info.compMethodInfo->args); unsigned cSlots = (argSize + TARGET_POINTER_SIZE - 1) / TARGET_POINTER_SIZE; // the total number of slots of this argument bool isHfaArg = false; var_types hfaType = TYP_UNDEF; // Methods that use VarArg or SoftFP cannot have HFA arguments except // Native varargs on arm64 unix use the regular calling convention. if (((TargetOS::IsUnix && TargetArchitecture::IsArm64) || !info.compIsVarArgs) && !opts.compUseSoftFP) { // If the argType is a struct, then check if it is an HFA if (varTypeIsStruct(argType)) { // hfaType is set to float, double, or SIMD type if it is an HFA, otherwise TYP_UNDEF hfaType = GetHfaType(typeHnd); isHfaArg = varTypeIsValidHfaType(hfaType); } } else if (info.compIsVarArgs) { // Currently native varargs is not implemented on non windows targets. // // Note that some targets like Arm64 Unix should not need much work as // the ABI is the same. While other targets may only need small changes // such as amd64 Unix, which just expects RAX to pass numFPArguments. if (TargetOS::IsUnix) { NYI("InitUserArgs for Vararg callee is not yet implemented on non Windows targets."); } } if (isHfaArg) { // We have an HFA argument, so from here on out treat the type as a float, double, or vector. // The orginal struct type is available by using origArgType. // We also update the cSlots to be the number of float/double/vector fields in the HFA. argType = hfaType; // TODO-Cleanup: remove this asignment and mark `argType` as const. varDsc->SetHfaType(hfaType); cSlots = varDsc->lvHfaSlots(); } // The number of slots that must be enregistered if we are to consider this argument enregistered. // This is normally the same as cSlots, since we normally either enregister the entire object, // or none of it. For structs on ARM, however, we only need to enregister a single slot to consider // it enregistered, as long as we can split the rest onto the stack. unsigned cSlotsToEnregister = cSlots; #if defined(TARGET_ARM64) if (compFeatureArgSplit()) { // On arm64 Windows we will need to properly handle the case where a >8byte <=16byte // struct is split between register r7 and virtual stack slot s[0] // We will only do this for calls to vararg methods on Windows Arm64 // // !!This does not affect the normal arm64 calling convention or Unix Arm64!! if (this->info.compIsVarArgs && argType == TYP_STRUCT) { if (varDscInfo->canEnreg(TYP_INT, 1) && // The beginning of the struct can go in a register !varDscInfo->canEnreg(TYP_INT, cSlots)) // The end of the struct can't fit in a register { cSlotsToEnregister = 1; // Force the split } } } #endif // defined(TARGET_ARM64) #ifdef TARGET_ARM // On ARM we pass the first 4 words of integer arguments and non-HFA structs in registers. // But we pre-spill user arguments in varargs methods and structs. // unsigned cAlign; bool preSpill = info.compIsVarArgs || isSoftFPPreSpill; switch (origArgType) { case TYP_STRUCT: assert(varDsc->lvSize() == argSize); cAlign = varDsc->lvStructDoubleAlign ? 2 : 1; // HFA arguments go on the stack frame. They don't get spilled in the prolog like struct // arguments passed in the integer registers but get homed immediately after the prolog. if (!isHfaArg) { // TODO-Arm32-Windows: vararg struct should be forced to split like // ARM64 above. cSlotsToEnregister = 1; // HFAs must be totally enregistered or not, but other structs can be split. preSpill = true; } break; case TYP_DOUBLE: case TYP_LONG: cAlign = 2; break; default: cAlign = 1; break; } if (isRegParamType(argType)) { compArgSize += varDscInfo->alignReg(argType, cAlign) * REGSIZE_BYTES; } if (argType == TYP_STRUCT) { // Are we going to split the struct between registers and stack? We can do that as long as // no floating-point arguments have been put on the stack. // // From the ARM Procedure Call Standard: // Rule C.5: "If the NCRN is less than r4 **and** the NSAA is equal to the SP," // then split the argument between registers and stack. Implication: if something // has already been spilled to the stack, then anything that would normally be // split between the core registers and the stack will be put on the stack. // Anything that follows will also be on the stack. However, if something from // floating point regs has been spilled to the stack, we can still use r0-r3 until they are full. if (varDscInfo->canEnreg(TYP_INT, 1) && // The beginning of the struct can go in a register !varDscInfo->canEnreg(TYP_INT, cSlots) && // The end of the struct can't fit in a register varDscInfo->existAnyFloatStackArgs()) // There's at least one stack-based FP arg already { varDscInfo->setAllRegArgUsed(TYP_INT); // Prevent all future use of integer registers preSpill = false; // This struct won't be prespilled, since it will go on the stack } } if (preSpill) { for (unsigned ix = 0; ix < cSlots; ix++) { if (!varDscInfo->canEnreg(TYP_INT, ix + 1)) { break; } regMaskTP regMask = genMapArgNumToRegMask(varDscInfo->regArgNum(TYP_INT) + ix, TYP_INT); if (cAlign == 2) { doubleAlignMask |= regMask; } codeGen->regSet.rsMaskPreSpillRegArg |= regMask; } } #else // !TARGET_ARM #if defined(UNIX_AMD64_ABI) SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc; if (varTypeIsStruct(argType)) { assert(typeHnd != nullptr); eeGetSystemVAmd64PassStructInRegisterDescriptor(typeHnd, &structDesc); if (structDesc.passedInRegisters) { unsigned intRegCount = 0; unsigned floatRegCount = 0; for (unsigned int i = 0; i < structDesc.eightByteCount; i++) { if (structDesc.IsIntegralSlot(i)) { intRegCount++; } else if (structDesc.IsSseSlot(i)) { floatRegCount++; } else { assert(false && "Invalid eightbyte classification type."); break; } } if (intRegCount != 0 && !varDscInfo->canEnreg(TYP_INT, intRegCount)) { structDesc.passedInRegisters = false; // No register to enregister the eightbytes. } if (floatRegCount != 0 && !varDscInfo->canEnreg(TYP_FLOAT, floatRegCount)) { structDesc.passedInRegisters = false; // No register to enregister the eightbytes. } } } #endif // UNIX_AMD64_ABI #endif // !TARGET_ARM // The final home for this incoming register might be our local stack frame. // For System V platforms the final home will always be on the local stack frame. varDsc->lvOnFrame = true; bool canPassArgInRegisters = false; #if defined(UNIX_AMD64_ABI) if (varTypeIsStruct(argType)) { canPassArgInRegisters = structDesc.passedInRegisters; } else #elif defined(TARGET_X86) if (varTypeIsStruct(argType) && isTrivialPointerSizedStruct(typeHnd)) { canPassArgInRegisters = varDscInfo->canEnreg(TYP_I_IMPL, cSlotsToEnregister); } else #endif // defined(UNIX_AMD64_ABI) { canPassArgInRegisters = varDscInfo->canEnreg(argType, cSlotsToEnregister); } if (canPassArgInRegisters) { /* Another register argument */ // Allocate the registers we need. allocRegArg() returns the first argument register number of the set. // For non-HFA structs, we still "try" to enregister the whole thing; it will just max out if splitting // to the stack happens. unsigned firstAllocatedRegArgNum = 0; #if FEATURE_MULTIREG_ARGS varDsc->SetOtherArgReg(REG_NA); #endif // FEATURE_MULTIREG_ARGS #if defined(UNIX_AMD64_ABI) unsigned secondAllocatedRegArgNum = 0; var_types firstEightByteType = TYP_UNDEF; var_types secondEightByteType = TYP_UNDEF; if (varTypeIsStruct(argType)) { if (structDesc.eightByteCount >= 1) { firstEightByteType = GetEightByteType(structDesc, 0); firstAllocatedRegArgNum = varDscInfo->allocRegArg(firstEightByteType, 1); } } else #endif // defined(UNIX_AMD64_ABI) { firstAllocatedRegArgNum = varDscInfo->allocRegArg(argType, cSlots); } if (isHfaArg) { // We need to save the fact that this HFA is enregistered // Note that we can have HVAs of SIMD types even if we are not recognizing intrinsics. // In that case, we won't have normalized the vector types on the varDsc, so if we have a single vector // register, we need to set the type now. Otherwise, later we'll assume this is passed by reference. if (varDsc->lvHfaSlots() != 1) { varDsc->lvIsMultiRegArg = true; } } varDsc->lvIsRegArg = 1; #if FEATURE_MULTIREG_ARGS #ifdef TARGET_ARM64 if (argType == TYP_STRUCT) { varDsc->SetArgReg(genMapRegArgNumToRegNum(firstAllocatedRegArgNum, TYP_I_IMPL)); if (cSlots == 2) { varDsc->SetOtherArgReg(genMapRegArgNumToRegNum(firstAllocatedRegArgNum + 1, TYP_I_IMPL)); varDsc->lvIsMultiRegArg = true; } } #elif defined(UNIX_AMD64_ABI) if (varTypeIsStruct(argType)) { varDsc->SetArgReg(genMapRegArgNumToRegNum(firstAllocatedRegArgNum, firstEightByteType)); // If there is a second eightbyte, get a register for it too and map the arg to the reg number. if (structDesc.eightByteCount >= 2) { secondEightByteType = GetEightByteType(structDesc, 1); secondAllocatedRegArgNum = varDscInfo->allocRegArg(secondEightByteType, 1); varDsc->lvIsMultiRegArg = true; } if (secondEightByteType != TYP_UNDEF) { varDsc->SetOtherArgReg(genMapRegArgNumToRegNum(secondAllocatedRegArgNum, secondEightByteType)); } } #else // ARM32 if (varTypeIsStruct(argType)) { varDsc->SetArgReg(genMapRegArgNumToRegNum(firstAllocatedRegArgNum, TYP_I_IMPL)); } #endif // ARM32 else #endif // FEATURE_MULTIREG_ARGS { varDsc->SetArgReg(genMapRegArgNumToRegNum(firstAllocatedRegArgNum, argType)); } #ifdef TARGET_ARM if (varDsc->TypeGet() == TYP_LONG) { varDsc->SetOtherArgReg(genMapRegArgNumToRegNum(firstAllocatedRegArgNum + 1, TYP_INT)); } #endif // TARGET_ARM #ifdef DEBUG if (verbose) { printf("Arg #%u passed in register(s) ", varDscInfo->varNum); #if defined(UNIX_AMD64_ABI) if (varTypeIsStruct(argType)) { // Print both registers, just to be clear if (firstEightByteType == TYP_UNDEF) { printf("firstEightByte: <not used>"); } else { printf("firstEightByte: %s", getRegName(genMapRegArgNumToRegNum(firstAllocatedRegArgNum, firstEightByteType))); } if (secondEightByteType == TYP_UNDEF) { printf(", secondEightByte: <not used>"); } else { printf(", secondEightByte: %s", getRegName(genMapRegArgNumToRegNum(secondAllocatedRegArgNum, secondEightByteType))); } } else #endif // defined(UNIX_AMD64_ABI) { bool isFloat = varTypeUsesFloatReg(argType); unsigned regArgNum = genMapRegNumToRegArgNum(varDsc->GetArgReg(), argType); for (unsigned ix = 0; ix < cSlots; ix++, regArgNum++) { if (ix > 0) { printf(","); } if (!isFloat && (regArgNum >= varDscInfo->maxIntRegArgNum)) // a struct has been split between // registers and stack { printf(" stack slots:%d", cSlots - ix); break; } #ifdef TARGET_ARM if (isFloat) { // Print register size prefix if (argType == TYP_DOUBLE) { // Print both registers, just to be clear printf("%s/%s", getRegName(genMapRegArgNumToRegNum(regArgNum, argType)), getRegName(genMapRegArgNumToRegNum(regArgNum + 1, argType))); // doubles take 2 slots assert(ix + 1 < cSlots); ++ix; ++regArgNum; } else { printf("%s", getRegName(genMapRegArgNumToRegNum(regArgNum, argType))); } } else #endif // TARGET_ARM { printf("%s", getRegName(genMapRegArgNumToRegNum(regArgNum, argType))); } } } printf("\n"); } #endif // DEBUG } // end if (canPassArgInRegisters) else { #if defined(TARGET_ARM) varDscInfo->setAllRegArgUsed(argType); if (varTypeUsesFloatReg(argType)) { varDscInfo->setAnyFloatStackArgs(); } #elif defined(TARGET_ARM64) // If we needed to use the stack in order to pass this argument then // record the fact that we have used up any remaining registers of this 'type' // This prevents any 'backfilling' from occuring on ARM64 // varDscInfo->setAllRegArgUsed(argType); #endif // TARGET_XXX #if FEATURE_FASTTAILCALL const unsigned argAlignment = eeGetArgAlignment(origArgType, (hfaType == TYP_FLOAT)); if (compMacOsArm64Abi()) { varDscInfo->stackArgSize = roundUp(varDscInfo->stackArgSize, argAlignment); } assert((argSize % argAlignment) == 0); assert((varDscInfo->stackArgSize % argAlignment) == 0); JITDUMP("set user arg V%02u offset to %u\n", varDscInfo->varNum, varDscInfo->stackArgSize); varDsc->SetStackOffset(varDscInfo->stackArgSize); varDscInfo->stackArgSize += argSize; #endif // FEATURE_FASTTAILCALL } #ifdef UNIX_AMD64_ABI // The arg size is returning the number of bytes of the argument. For a struct it could return a size not a // multiple of TARGET_POINTER_SIZE. The stack allocated space should always be multiple of TARGET_POINTER_SIZE, // so round it up. compArgSize += roundUp(argSize, TARGET_POINTER_SIZE); #else // !UNIX_AMD64_ABI compArgSize += argSize; #endif // !UNIX_AMD64_ABI if (info.compIsVarArgs || isSoftFPPreSpill) { #if defined(TARGET_X86) varDsc->SetStackOffset(compArgSize); #else // !TARGET_X86 // TODO-CQ: We shouldn't have to go as far as to declare these // address-exposed -- DoNotEnregister should suffice. lvaSetVarAddrExposed(varDscInfo->varNum DEBUGARG(AddressExposedReason::TOO_CONSERVATIVE)); #endif // !TARGET_X86 } if (opts.IsOSR() && info.compPatchpointInfo->IsExposed(varDscInfo->varNum)) { JITDUMP("-- V%02u is OSR exposed\n", varDscInfo->varNum); varDsc->lvHasLdAddrOp = 1; lvaSetVarAddrExposed(varDscInfo->varNum DEBUGARG(AddressExposedReason::OSR_EXPOSED)); } } compArgSize = GetOutgoingArgByteSize(compArgSize); #ifdef TARGET_ARM if (doubleAlignMask != RBM_NONE) { assert(RBM_ARG_REGS == 0xF); assert((doubleAlignMask & RBM_ARG_REGS) == doubleAlignMask); if (doubleAlignMask != RBM_NONE && doubleAlignMask != RBM_ARG_REGS) { // 'double aligned types' can begin only at r0 or r2 and we always expect at least two registers to be used // Note that in rare cases, we can have double-aligned structs of 12 bytes (if specified explicitly with // attributes) assert((doubleAlignMask == 0b0011) || (doubleAlignMask == 0b1100) || (doubleAlignMask == 0b0111) /* || 0b1111 is if'ed out */); // Now if doubleAlignMask is xyz1 i.e., the struct starts in r0, and we prespill r2 or r3 // but not both, then the stack would be misaligned for r0. So spill both // r2 and r3. // // ; +0 --- caller SP double aligned ---- // ; -4 r2 r3 // ; -8 r1 r1 // ; -c r0 r0 <-- misaligned. // ; callee saved regs bool startsAtR0 = (doubleAlignMask & 1) == 1; bool r2XorR3 = ((codeGen->regSet.rsMaskPreSpillRegArg & RBM_R2) == 0) != ((codeGen->regSet.rsMaskPreSpillRegArg & RBM_R3) == 0); if (startsAtR0 && r2XorR3) { codeGen->regSet.rsMaskPreSpillAlign = (~codeGen->regSet.rsMaskPreSpillRegArg & ~doubleAlignMask) & RBM_ARG_REGS; } } } #endif // TARGET_ARM } /*****************************************************************************/ void Compiler::lvaInitGenericsCtxt(InitVarDscInfo* varDscInfo) { //@GENERICS: final instantiation-info argument for shared generic methods // and shared generic struct instance methods if (info.compMethodInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) { info.compTypeCtxtArg = varDscInfo->varNum; LclVarDsc* varDsc = varDscInfo->varDsc; varDsc->lvIsParam = 1; varDsc->lvType = TYP_I_IMPL; if (varDscInfo->canEnreg(TYP_I_IMPL)) { /* Another register argument */ varDsc->lvIsRegArg = 1; varDsc->SetArgReg(genMapRegArgNumToRegNum(varDscInfo->regArgNum(TYP_INT), varDsc->TypeGet())); #if FEATURE_MULTIREG_ARGS varDsc->SetOtherArgReg(REG_NA); #endif varDsc->lvOnFrame = true; // The final home for this incoming register might be our local stack frame varDscInfo->intRegArgNum++; #ifdef DEBUG if (verbose) { printf("'GenCtxt' passed in register %s\n", getRegName(varDsc->GetArgReg())); } #endif } else { // We need to mark these as being on the stack, as this is not done elsewhere in the case that canEnreg // returns false. varDsc->lvOnFrame = true; #if FEATURE_FASTTAILCALL varDsc->SetStackOffset(varDscInfo->stackArgSize); varDscInfo->stackArgSize += TARGET_POINTER_SIZE; #endif // FEATURE_FASTTAILCALL } compArgSize += TARGET_POINTER_SIZE; #if defined(TARGET_X86) if (info.compIsVarArgs) varDsc->SetStackOffset(compArgSize); #endif // TARGET_X86 varDscInfo->varNum++; varDscInfo->varDsc++; } } /*****************************************************************************/ void Compiler::lvaInitVarArgsHandle(InitVarDscInfo* varDscInfo) { if (info.compIsVarArgs) { lvaVarargsHandleArg = varDscInfo->varNum; LclVarDsc* varDsc = varDscInfo->varDsc; varDsc->lvType = TYP_I_IMPL; varDsc->lvIsParam = 1; #if defined(TARGET_X86) // Codegen will need it for x86 scope info. varDsc->lvImplicitlyReferenced = 1; #endif // TARGET_X86 lvaSetVarDoNotEnregister(lvaVarargsHandleArg DEBUGARG(DoNotEnregisterReason::VMNeedsStackAddr)); assert(mostRecentlyActivePhase == PHASE_PRE_IMPORT); // TODO-Cleanup: this is preImportation phase, why do we try to work with regs here? // Should it be just deleted? if (varDscInfo->canEnreg(TYP_I_IMPL)) { /* Another register argument */ unsigned varArgHndArgNum = varDscInfo->allocRegArg(TYP_I_IMPL); varDsc->lvIsRegArg = 1; varDsc->SetArgReg(genMapRegArgNumToRegNum(varArgHndArgNum, TYP_I_IMPL)); #if FEATURE_MULTIREG_ARGS varDsc->SetOtherArgReg(REG_NA); #endif varDsc->lvOnFrame = true; // The final home for this incoming register might be our local stack frame #ifdef TARGET_ARM // This has to be spilled right in front of the real arguments and we have // to pre-spill all the argument registers explicitly because we only have // have symbols for the declared ones, not any potential variadic ones. for (unsigned ix = varArgHndArgNum; ix < ArrLen(intArgMasks); ix++) { codeGen->regSet.rsMaskPreSpillRegArg |= intArgMasks[ix]; } #endif // TARGET_ARM #ifdef DEBUG if (verbose) { printf("'VarArgHnd' passed in register %s\n", getRegName(varDsc->GetArgReg())); } #endif // DEBUG } else { // We need to mark these as being on the stack, as this is not done elsewhere in the case that canEnreg // returns false. varDsc->lvOnFrame = true; #if FEATURE_FASTTAILCALL varDsc->SetStackOffset(varDscInfo->stackArgSize); varDscInfo->stackArgSize += TARGET_POINTER_SIZE; #endif // FEATURE_FASTTAILCALL } /* Update the total argument size, count and varDsc */ compArgSize += TARGET_POINTER_SIZE; varDscInfo->varNum++; varDscInfo->varDsc++; #if defined(TARGET_X86) varDsc->SetStackOffset(compArgSize); // Allocate a temp to point at the beginning of the args lvaVarargsBaseOfStkArgs = lvaGrabTemp(false DEBUGARG("Varargs BaseOfStkArgs")); lvaTable[lvaVarargsBaseOfStkArgs].lvType = TYP_I_IMPL; #endif // TARGET_X86 } } /*****************************************************************************/ void Compiler::lvaInitVarDsc(LclVarDsc* varDsc, unsigned varNum, CorInfoType corInfoType, CORINFO_CLASS_HANDLE typeHnd, CORINFO_ARG_LIST_HANDLE varList, CORINFO_SIG_INFO* varSig) { noway_assert(varDsc == lvaGetDesc(varNum)); switch (corInfoType) { // Mark types that looks like a pointer for doing shadow-copying of // parameters if we have an unsafe buffer. // Note that this does not handle structs with pointer fields. Instead, // we rely on using the assign-groups/equivalence-groups in // gsFindVulnerableParams() to determine if a buffer-struct contains a // pointer. We could do better by having the EE determine this for us. // Note that we want to keep buffers without pointers at lower memory // addresses than buffers with pointers. case CORINFO_TYPE_PTR: case CORINFO_TYPE_BYREF: case CORINFO_TYPE_CLASS: case CORINFO_TYPE_STRING: case CORINFO_TYPE_VAR: case CORINFO_TYPE_REFANY: varDsc->lvIsPtr = 1; break; default: break; } var_types type = JITtype2varType(corInfoType); if (varTypeIsFloating(type)) { compFloatingPointUsed = true; } if (typeHnd) { unsigned cFlags = info.compCompHnd->getClassAttribs(typeHnd); // We can get typeHnds for primitive types, these are value types which only contain // a primitive. We will need the typeHnd to distinguish them, so we store it here. if ((cFlags & CORINFO_FLG_VALUECLASS) && !varTypeIsStruct(type)) { // printf("This is a struct that the JIT will treat as a primitive\n"); varDsc->lvVerTypeInfo = verMakeTypeInfo(typeHnd); } varDsc->lvOverlappingFields = StructHasOverlappingFields(cFlags); } #if defined(TARGET_AMD64) || defined(TARGET_ARM64) varDsc->lvIsImplicitByRef = 0; #endif // defined(TARGET_AMD64) || defined(TARGET_ARM64) // Set the lvType (before this point it is TYP_UNDEF). if (GlobalJitOptions::compFeatureHfa) { varDsc->SetHfaType(TYP_UNDEF); } if ((varTypeIsStruct(type))) { lvaSetStruct(varNum, typeHnd, typeHnd != nullptr, true); if (info.compIsVarArgs) { lvaSetStructUsedAsVarArg(varNum); } } else { varDsc->lvType = type; } if (type == TYP_BOOL) { varDsc->lvIsBoolean = true; } #ifdef DEBUG varDsc->SetStackOffset(BAD_STK_OFFS); #endif #if FEATURE_MULTIREG_ARGS varDsc->SetOtherArgReg(REG_NA); #endif // FEATURE_MULTIREG_ARGS } /***************************************************************************** * Returns our internal varNum for a given IL variable. * Asserts assume it is called after lvaTable[] has been set up. */ unsigned Compiler::compMapILvarNum(unsigned ILvarNum) { noway_assert(ILvarNum < info.compILlocalsCount || ILvarNum > unsigned(ICorDebugInfo::UNKNOWN_ILNUM)); unsigned varNum; if (ILvarNum == (unsigned)ICorDebugInfo::VARARGS_HND_ILNUM) { // The varargs cookie is the last argument in lvaTable[] noway_assert(info.compIsVarArgs); varNum = lvaVarargsHandleArg; noway_assert(lvaTable[varNum].lvIsParam); } else if (ILvarNum == (unsigned)ICorDebugInfo::RETBUF_ILNUM) { noway_assert(info.compRetBuffArg != BAD_VAR_NUM); varNum = info.compRetBuffArg; } else if (ILvarNum == (unsigned)ICorDebugInfo::TYPECTXT_ILNUM) { noway_assert(info.compTypeCtxtArg >= 0); varNum = unsigned(info.compTypeCtxtArg); } else if (ILvarNum < info.compILargsCount) { // Parameter varNum = compMapILargNum(ILvarNum); noway_assert(lvaTable[varNum].lvIsParam); } else if (ILvarNum < info.compILlocalsCount) { // Local variable unsigned lclNum = ILvarNum - info.compILargsCount; varNum = info.compArgsCount + lclNum; noway_assert(!lvaTable[varNum].lvIsParam); } else { unreached(); } noway_assert(varNum < info.compLocalsCount); return varNum; } /***************************************************************************** * Returns the IL variable number given our internal varNum. * Special return values are VARG_ILNUM, RETBUF_ILNUM, TYPECTXT_ILNUM. * * Returns UNKNOWN_ILNUM if it can't be mapped. */ unsigned Compiler::compMap2ILvarNum(unsigned varNum) const { if (compIsForInlining()) { return impInlineInfo->InlinerCompiler->compMap2ILvarNum(varNum); } noway_assert(varNum < lvaCount); if (varNum == info.compRetBuffArg) { return (unsigned)ICorDebugInfo::RETBUF_ILNUM; } // Is this a varargs function? if (info.compIsVarArgs && varNum == lvaVarargsHandleArg) { return (unsigned)ICorDebugInfo::VARARGS_HND_ILNUM; } // We create an extra argument for the type context parameter // needed for shared generic code. if ((info.compMethodInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) && varNum == (unsigned)info.compTypeCtxtArg) { return (unsigned)ICorDebugInfo::TYPECTXT_ILNUM; } #if FEATURE_FIXED_OUT_ARGS if (varNum == lvaOutgoingArgSpaceVar) { return (unsigned)ICorDebugInfo::UNKNOWN_ILNUM; // Cannot be mapped } #endif // FEATURE_FIXED_OUT_ARGS // Now mutate varNum to remove extra parameters from the count. if ((info.compMethodInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) && varNum > (unsigned)info.compTypeCtxtArg) { varNum--; } if (info.compIsVarArgs && varNum > lvaVarargsHandleArg) { varNum--; } /* Is there a hidden argument for the return buffer. Note that this code works because if the RetBuffArg is not present, compRetBuffArg will be BAD_VAR_NUM */ if (info.compRetBuffArg != BAD_VAR_NUM && varNum > info.compRetBuffArg) { varNum--; } if (varNum >= info.compLocalsCount) { return (unsigned)ICorDebugInfo::UNKNOWN_ILNUM; // Cannot be mapped } return varNum; } /***************************************************************************** * Returns true if variable "varNum" may be address-exposed. */ bool Compiler::lvaVarAddrExposed(unsigned varNum) const { const LclVarDsc* varDsc = lvaGetDesc(varNum); return varDsc->IsAddressExposed(); } /***************************************************************************** * Returns true iff variable "varNum" should not be enregistered (or one of several reasons). */ bool Compiler::lvaVarDoNotEnregister(unsigned varNum) { LclVarDsc* varDsc = lvaGetDesc(varNum); return varDsc->lvDoNotEnregister; } //------------------------------------------------------------------------ // lvInitializeDoNotEnregFlag: a helper to initialize `lvDoNotEnregister` flag // for locals that were created before the compiler decided its optimization level. // // Assumptions: // compEnregLocals() value is finalized and is set to false. // void Compiler::lvSetMinOptsDoNotEnreg() { JITDUMP("compEnregLocals() is false, setting doNotEnreg flag for all locals."); assert(!compEnregLocals()); for (unsigned lclNum = 0; lclNum < lvaCount; lclNum++) { lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::NoRegVars)); } } /***************************************************************************** * Returns the handle to the class of the local variable varNum */ CORINFO_CLASS_HANDLE Compiler::lvaGetStruct(unsigned varNum) { const LclVarDsc* varDsc = lvaGetDesc(varNum); return varDsc->GetStructHnd(); } //-------------------------------------------------------------------------------------------- // lvaFieldOffsetCmp - a static compare function passed to jitstd::sort() by Compiler::StructPromotionHelper; // compares fields' offsets. // // Arguments: // field1 - pointer to the first field; // field2 - pointer to the second field. // // Return value: // 0 if the fields' offsets are equal, 1 if the first field has bigger offset, -1 otherwise. // bool Compiler::lvaFieldOffsetCmp::operator()(const lvaStructFieldInfo& field1, const lvaStructFieldInfo& field2) { return field1.fldOffset < field2.fldOffset; } //------------------------------------------------------------------------ // StructPromotionHelper constructor. // // Arguments: // compiler - pointer to a compiler to get access to an allocator, compHandle etc. // Compiler::StructPromotionHelper::StructPromotionHelper(Compiler* compiler) : compiler(compiler) , structPromotionInfo() #ifdef DEBUG , retypedFieldsMap(compiler->getAllocator(CMK_DebugOnly)) #endif // DEBUG { } //-------------------------------------------------------------------------------------------- // TryPromoteStructVar - promote struct var if it is possible and profitable. // // Arguments: // lclNum - struct number to try. // // Return value: // true if the struct var was promoted. // bool Compiler::StructPromotionHelper::TryPromoteStructVar(unsigned lclNum) { if (CanPromoteStructVar(lclNum)) { #if 0 // Often-useful debugging code: if you've narrowed down a struct-promotion problem to a single // method, this allows you to select a subset of the vars to promote (by 1-based ordinal number). static int structPromoVarNum = 0; structPromoVarNum++; if (atoi(getenv("structpromovarnumlo")) <= structPromoVarNum && structPromoVarNum <= atoi(getenv("structpromovarnumhi"))) #endif // 0 if (ShouldPromoteStructVar(lclNum)) { PromoteStructVar(lclNum); return true; } } return false; } #ifdef DEBUG //-------------------------------------------------------------------------------------------- // CheckRetypedAsScalar - check that the fldType for this fieldHnd was retyped as requested type. // // Arguments: // fieldHnd - the field handle; // requestedType - as which type the field was accessed; // // Notes: // For example it can happen when such struct A { struct B { long c } } is compiled and we access A.B.c, // it could look like "GT_FIELD struct B.c -> ADDR -> GT_FIELD struct A.B -> ADDR -> LCL_VAR A" , but // "GT_FIELD struct A.B -> ADDR -> LCL_VAR A" can be promoted to "LCL_VAR long A.B" and then // there is type mistmatch between "GT_FIELD struct B.c" and "LCL_VAR long A.B". // void Compiler::StructPromotionHelper::CheckRetypedAsScalar(CORINFO_FIELD_HANDLE fieldHnd, var_types requestedType) { assert(retypedFieldsMap.Lookup(fieldHnd)); assert(retypedFieldsMap[fieldHnd] == requestedType); } #endif // DEBUG //-------------------------------------------------------------------------------------------- // CanPromoteStructType - checks if the struct type can be promoted. // // Arguments: // typeHnd - struct handle to check. // // Return value: // true if the struct type can be promoted. // // Notes: // The last analyzed type is memorized to skip the check if we ask about the same time again next. // However, it was not found profitable to memorize all analyzed types in a map. // // The check initializes only nessasary fields in lvaStructPromotionInfo, // so if the promotion is rejected early than most fields will be uninitialized. // bool Compiler::StructPromotionHelper::CanPromoteStructType(CORINFO_CLASS_HANDLE typeHnd) { assert(typeHnd != nullptr); if (!compiler->eeIsValueClass(typeHnd)) { // TODO-ObjectStackAllocation: Enable promotion of fields of stack-allocated objects. return false; } if (structPromotionInfo.typeHnd == typeHnd) { // Asking for the same type of struct as the last time. // Nothing need to be done. // Fall through ... return structPromotionInfo.canPromote; } // Analyze this type from scratch. structPromotionInfo = lvaStructPromotionInfo(typeHnd); // sizeof(double) represents the size of the largest primitive type that we can struct promote. // In the future this may be changing to XMM_REGSIZE_BYTES. // Note: MaxOffset is used below to declare a local array, and therefore must be a compile-time constant. CLANG_FORMAT_COMMENT_ANCHOR; #if defined(FEATURE_SIMD) #if defined(TARGET_XARCH) // This will allow promotion of 4 Vector<T> fields on AVX2 or Vector256<T> on AVX, // or 8 Vector<T>/Vector128<T> fields on SSE2. const int MaxOffset = MAX_NumOfFieldsInPromotableStruct * YMM_REGSIZE_BYTES; #elif defined(TARGET_ARM64) const int MaxOffset = MAX_NumOfFieldsInPromotableStruct * FP_REGSIZE_BYTES; #endif // defined(TARGET_XARCH) || defined(TARGET_ARM64) #else // !FEATURE_SIMD const int MaxOffset = MAX_NumOfFieldsInPromotableStruct * sizeof(double); #endif // !FEATURE_SIMD assert((BYTE)MaxOffset == MaxOffset); // because lvaStructFieldInfo.fldOffset is byte-sized assert((BYTE)MAX_NumOfFieldsInPromotableStruct == MAX_NumOfFieldsInPromotableStruct); // because lvaStructFieldInfo.fieldCnt is byte-sized bool containsGCpointers = false; COMP_HANDLE compHandle = compiler->info.compCompHnd; unsigned structSize = compHandle->getClassSize(typeHnd); if (structSize > MaxOffset) { return false; // struct is too large } unsigned fieldCnt = compHandle->getClassNumInstanceFields(typeHnd); if (fieldCnt == 0 || fieldCnt > MAX_NumOfFieldsInPromotableStruct) { return false; // struct must have between 1 and MAX_NumOfFieldsInPromotableStruct fields } structPromotionInfo.fieldCnt = (unsigned char)fieldCnt; DWORD typeFlags = compHandle->getClassAttribs(typeHnd); bool overlappingFields = StructHasOverlappingFields(typeFlags); if (overlappingFields) { return false; } // Don't struct promote if we have an CUSTOMLAYOUT flag on an HFA type if (StructHasCustomLayout(typeFlags) && compiler->IsHfa(typeHnd)) { return false; } #ifdef TARGET_ARM // On ARM, we have a requirement on the struct alignment; see below. unsigned structAlignment = roundUp(compHandle->getClassAlignmentRequirement(typeHnd), TARGET_POINTER_SIZE); #endif // TARGET_ARM // If we have "Custom Layout" then we might have an explicit Size attribute // Managed C++ uses this for its structs, such C++ types will not contain GC pointers. // // The current VM implementation also incorrectly sets the CORINFO_FLG_CUSTOMLAYOUT // whenever a managed value class contains any GC pointers. // (See the comment for VMFLAG_NOT_TIGHTLY_PACKED in class.h) // // It is important to struct promote managed value classes that have GC pointers // So we compute the correct value for "CustomLayout" here // if (StructHasCustomLayout(typeFlags) && ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) == 0)) { structPromotionInfo.customLayout = true; } if (StructHasDontDigFieldsFlagSet(typeFlags)) { return CanConstructAndPromoteField(&structPromotionInfo); } unsigned fieldsSize = 0; for (BYTE ordinal = 0; ordinal < fieldCnt; ++ordinal) { CORINFO_FIELD_HANDLE fieldHnd = compHandle->getFieldInClass(typeHnd, ordinal); structPromotionInfo.fields[ordinal] = GetFieldInfo(fieldHnd, ordinal); const lvaStructFieldInfo& fieldInfo = structPromotionInfo.fields[ordinal]; noway_assert(fieldInfo.fldOffset < structSize); if (fieldInfo.fldSize == 0) { // Not a scalar type. return false; } if ((fieldInfo.fldOffset % fieldInfo.fldSize) != 0) { // The code in Compiler::genPushArgList that reconstitutes // struct values on the stack from promoted fields expects // those fields to be at their natural alignment. return false; } if (varTypeIsGC(fieldInfo.fldType)) { containsGCpointers = true; } // The end offset for this field should never be larger than our structSize. noway_assert(fieldInfo.fldOffset + fieldInfo.fldSize <= structSize); fieldsSize += fieldInfo.fldSize; #ifdef TARGET_ARM // On ARM, for struct types that don't use explicit layout, the alignment of the struct is // at least the max alignment of its fields. We take advantage of this invariant in struct promotion, // so verify it here. if (fieldInfo.fldSize > structAlignment) { // Don't promote vars whose struct types violates the invariant. (Alignment == size for primitives.) return false; } #endif // TARGET_ARM } // If we saw any GC pointer or by-ref fields above then CORINFO_FLG_CONTAINS_GC_PTR or // CORINFO_FLG_BYREF_LIKE has to be set! noway_assert((containsGCpointers == false) || ((typeFlags & (CORINFO_FLG_CONTAINS_GC_PTR | CORINFO_FLG_BYREF_LIKE)) != 0)); // Check if this promoted struct contains any holes. assert(!overlappingFields); if (fieldsSize != structSize) { // If sizes do not match it means we have an overlapping fields or holes. // Overlapping fields were rejected early, so here it can mean only holes. structPromotionInfo.containsHoles = true; } // Cool, this struct is promotable. structPromotionInfo.canPromote = true; return true; } //-------------------------------------------------------------------------------------------- // CanConstructAndPromoteField - checks if we can construct field types without asking about them directly. // // Arguments: // structPromotionInfo - struct promotion candidate information. // // Return value: // true if we can figure out the fields from available knowledge. // // Notes: // This is needed for AOT R2R compilation when we can't cross compilation bubble borders // so we should not ask about fields that are not directly referenced. If we do VM will have // to emit a type check for this field type but it does not have enough information about it. // As a workaround for perfomance critical corner case: struct with 1 gcref, we try to construct // the field information from indirect observations. // bool Compiler::StructPromotionHelper::CanConstructAndPromoteField(lvaStructPromotionInfo* structPromotionInfo) { const CORINFO_CLASS_HANDLE typeHnd = structPromotionInfo->typeHnd; const COMP_HANDLE compHandle = compiler->info.compCompHnd; const DWORD typeFlags = compHandle->getClassAttribs(typeHnd); if (structPromotionInfo->fieldCnt != 1) { // Can't find out values for several fields. return false; } if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) == 0) { // Can't find out type of a non-gc field. return false; } const unsigned structSize = compHandle->getClassSize(typeHnd); if (structSize != TARGET_POINTER_SIZE) { return false; } assert(!structPromotionInfo->containsHoles); assert(!structPromotionInfo->customLayout); lvaStructFieldInfo& fldInfo = structPromotionInfo->fields[0]; fldInfo.fldHnd = compHandle->getFieldInClass(typeHnd, 0); // We should not read it anymore. fldInfo.fldTypeHnd = 0; fldInfo.fldOffset = 0; fldInfo.fldOrdinal = 0; fldInfo.fldSize = TARGET_POINTER_SIZE; fldInfo.fldType = TYP_BYREF; structPromotionInfo->canPromote = true; return true; } //-------------------------------------------------------------------------------------------- // CanPromoteStructVar - checks if the struct can be promoted. // // Arguments: // lclNum - struct number to check. // // Return value: // true if the struct var can be promoted. // bool Compiler::StructPromotionHelper::CanPromoteStructVar(unsigned lclNum) { LclVarDsc* varDsc = compiler->lvaGetDesc(lclNum); assert(varTypeIsStruct(varDsc)); assert(!varDsc->lvPromoted); // Don't ask again :) // If this lclVar is used in a SIMD intrinsic, then we don't want to struct promote it. // Note, however, that SIMD lclVars that are NOT used in a SIMD intrinsic may be // profitably promoted. if (varDsc->lvIsUsedInSIMDIntrinsic()) { JITDUMP(" struct promotion of V%02u is disabled because lvIsUsedInSIMDIntrinsic()\n", lclNum); return false; } // Reject struct promotion of parameters when -GS stack reordering is enabled // as we could introduce shadow copies of them. if (varDsc->lvIsParam && compiler->compGSReorderStackLayout) { JITDUMP(" struct promotion of V%02u is disabled because lvIsParam and compGSReorderStackLayout\n", lclNum); return false; } if (!compiler->lvaEnregMultiRegVars && varDsc->lvIsMultiRegArgOrRet()) { JITDUMP(" struct promotion of V%02u is disabled because lvIsMultiRegArgOrRet()\n", lclNum); return false; } CORINFO_CLASS_HANDLE typeHnd = varDsc->GetStructHnd(); assert(typeHnd != NO_CLASS_HANDLE); bool canPromote = CanPromoteStructType(typeHnd); if (canPromote && varDsc->lvIsMultiRegArgOrRet()) { unsigned fieldCnt = structPromotionInfo.fieldCnt; if (fieldCnt > MAX_MULTIREG_COUNT) { canPromote = false; } #if defined(TARGET_ARMARCH) else { for (unsigned i = 0; canPromote && (i < fieldCnt); i++) { var_types fieldType = structPromotionInfo.fields[i].fldType; // Non-HFA structs are always passed in general purpose registers. // If there are any floating point fields, don't promote for now. // Likewise, since HVA structs are passed in SIMD registers // promotion of non FP or SIMD type fields is disallowed. // TODO-1stClassStructs: add support in Lowering and prolog generation // to enable promoting these types. if (varDsc->lvIsParam && (varDsc->lvIsHfa() != varTypeUsesFloatReg(fieldType))) { canPromote = false; } #if defined(FEATURE_SIMD) // If we have a register-passed struct with mixed non-opaque SIMD types (i.e. with defined fields) // and non-SIMD types, we don't currently handle that case in the prolog, so we can't promote. else if ((fieldCnt > 1) && varTypeIsStruct(fieldType) && !compiler->isOpaqueSIMDType(structPromotionInfo.fields[i].fldTypeHnd)) { canPromote = false; } #endif // FEATURE_SIMD } } #elif defined(UNIX_AMD64_ABI) else { SortStructFields(); // Only promote if the field types match the registers, unless we have a single SIMD field. SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc; compiler->eeGetSystemVAmd64PassStructInRegisterDescriptor(typeHnd, &structDesc); unsigned regCount = structDesc.eightByteCount; if ((structPromotionInfo.fieldCnt == 1) && varTypeIsSIMD(structPromotionInfo.fields[0].fldType)) { // Allow the case of promoting a single SIMD field, even if there are multiple registers. // We will fix this up in the prolog. } else if (structPromotionInfo.fieldCnt != regCount) { canPromote = false; } else { for (unsigned i = 0; canPromote && (i < regCount); i++) { lvaStructFieldInfo* fieldInfo = &(structPromotionInfo.fields[i]); var_types fieldType = fieldInfo->fldType; // We don't currently support passing SIMD types in registers. if (varTypeIsSIMD(fieldType)) { canPromote = false; } else if (varTypeUsesFloatReg(fieldType) != (structDesc.eightByteClassifications[i] == SystemVClassificationTypeSSE)) { canPromote = false; } } } } #endif // UNIX_AMD64_ABI } return canPromote; } //-------------------------------------------------------------------------------------------- // ShouldPromoteStructVar - Should a struct var be promoted if it can be promoted? // This routine mainly performs profitability checks. Right now it also has // some correctness checks due to limitations of down-stream phases. // // Arguments: // lclNum - struct local number; // // Return value: // true if the struct should be promoted. // bool Compiler::StructPromotionHelper::ShouldPromoteStructVar(unsigned lclNum) { LclVarDsc* varDsc = compiler->lvaGetDesc(lclNum); assert(varTypeIsStruct(varDsc)); assert(varDsc->GetStructHnd() == structPromotionInfo.typeHnd); assert(structPromotionInfo.canPromote); bool shouldPromote = true; // We *can* promote; *should* we promote? // We should only do so if promotion has potential savings. One source of savings // is if a field of the struct is accessed, since this access will be turned into // an access of the corresponding promoted field variable. Even if there are no // field accesses, but only block-level operations on the whole struct, if the struct // has only one or two fields, then doing those block operations field-wise is probably faster // than doing a whole-variable block operation (e.g., a hardware "copy loop" on x86). // Struct promotion also provides the following benefits: reduce stack frame size, // reduce the need for zero init of stack frame and fine grained constant/copy prop. // Asm diffs indicate that promoting structs up to 3 fields is a net size win. // So if no fields are accessed independently, and there are four or more fields, // then do not promote. // // TODO: Ideally we would want to consider the impact of whether the struct is // passed as a parameter or assigned the return value of a call. Because once promoted, // struct copying is done by field by field assignment instead of a more efficient // rep.stos or xmm reg based copy. if (structPromotionInfo.fieldCnt > 3 && !varDsc->lvFieldAccessed) { JITDUMP("Not promoting promotable struct local V%02u: #fields = %d, fieldAccessed = %d.\n", lclNum, structPromotionInfo.fieldCnt, varDsc->lvFieldAccessed); shouldPromote = false; } else if (varDsc->lvIsMultiRegRet && structPromotionInfo.containsHoles && structPromotionInfo.customLayout) { JITDUMP("Not promoting multi-reg returned struct local V%02u with holes.\n", lclNum); shouldPromote = false; } #if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_ARM) // TODO-PERF - Only do this when the LclVar is used in an argument context // TODO-ARM64 - HFA support should also eliminate the need for this. // TODO-ARM32 - HFA support should also eliminate the need for this. // TODO-LSRA - Currently doesn't support the passing of floating point LCL_VARS in the integer registers // // For now we currently don't promote structs with a single float field // Promoting it can cause us to shuffle it back and forth between the int and // the float regs when it is used as a argument, which is very expensive for XARCH // else if ((structPromotionInfo.fieldCnt == 1) && varTypeIsFloating(structPromotionInfo.fields[0].fldType)) { JITDUMP("Not promoting promotable struct local V%02u: #fields = %d because it is a struct with " "single float field.\n", lclNum, structPromotionInfo.fieldCnt); shouldPromote = false; } #endif // TARGET_AMD64 || TARGET_ARM64 || TARGET_ARM else if (varDsc->lvIsParam && !compiler->lvaIsImplicitByRefLocal(lclNum) && !varDsc->lvIsHfa()) { #if FEATURE_MULTIREG_STRUCT_PROMOTE // Is this a variable holding a value with exactly two fields passed in // multiple registers? if (compiler->lvaIsMultiregStruct(varDsc, compiler->info.compIsVarArgs)) { if (structPromotionInfo.containsHoles && structPromotionInfo.customLayout) { JITDUMP("Not promoting multi-reg struct local V%02u with holes.\n", lclNum); shouldPromote = false; } else if ((structPromotionInfo.fieldCnt != 2) && !((structPromotionInfo.fieldCnt == 1) && varTypeIsSIMD(structPromotionInfo.fields[0].fldType))) { JITDUMP("Not promoting multireg struct local V%02u, because lvIsParam is true, #fields != 2 and it's " "not a single SIMD.\n", lclNum); shouldPromote = false; } } else #endif // !FEATURE_MULTIREG_STRUCT_PROMOTE // TODO-PERF - Implement struct promotion for incoming single-register structs. // Also the implementation of jmp uses the 4 byte move to store // byte parameters to the stack, so that if we have a byte field // with something else occupying the same 4-byte slot, it will // overwrite other fields. if (structPromotionInfo.fieldCnt != 1) { JITDUMP("Not promoting promotable struct local V%02u, because lvIsParam is true and #fields = " "%d.\n", lclNum, structPromotionInfo.fieldCnt); shouldPromote = false; } } else if ((lclNum == compiler->genReturnLocal) && (structPromotionInfo.fieldCnt > 1)) { // TODO-1stClassStructs: a temporary solution to keep diffs small, it will be fixed later. shouldPromote = false; } #if defined(DEBUG) else if (compiler->compPromoteFewerStructs(lclNum)) { // Do not promote some structs, that can be promoted, to stress promoted/unpromoted moves. JITDUMP("Not promoting promotable struct local V%02u, because of STRESS_PROMOTE_FEWER_STRUCTS\n", lclNum); shouldPromote = false; } #endif // // If the lvRefCnt is zero and we have a struct promoted parameter we can end up with an extra store of // the the incoming register into the stack frame slot. // In that case, we would like to avoid promortion. // However we haven't yet computed the lvRefCnt values so we can't do that. // CLANG_FORMAT_COMMENT_ANCHOR; return shouldPromote; } //-------------------------------------------------------------------------------------------- // SortStructFields - sort the fields according to the increasing order of the field offset. // // Notes: // This is needed because the fields need to be pushed on stack (when referenced as a struct) in offset order. // void Compiler::StructPromotionHelper::SortStructFields() { if (!structPromotionInfo.fieldsSorted) { jitstd::sort(structPromotionInfo.fields, structPromotionInfo.fields + structPromotionInfo.fieldCnt, lvaFieldOffsetCmp()); structPromotionInfo.fieldsSorted = true; } } //-------------------------------------------------------------------------------------------- // GetFieldInfo - get struct field information. // Arguments: // fieldHnd - field handle to get info for; // ordinal - field ordinal. // // Return value: // field information. // Compiler::lvaStructFieldInfo Compiler::StructPromotionHelper::GetFieldInfo(CORINFO_FIELD_HANDLE fieldHnd, BYTE ordinal) { lvaStructFieldInfo fieldInfo; fieldInfo.fldHnd = fieldHnd; unsigned fldOffset = compiler->info.compCompHnd->getFieldOffset(fieldInfo.fldHnd); fieldInfo.fldOffset = (BYTE)fldOffset; fieldInfo.fldOrdinal = ordinal; CorInfoType corType = compiler->info.compCompHnd->getFieldType(fieldInfo.fldHnd, &fieldInfo.fldTypeHnd); fieldInfo.fldType = JITtype2varType(corType); fieldInfo.fldSize = genTypeSize(fieldInfo.fldType); #ifdef FEATURE_SIMD // Check to see if this is a SIMD type. // We will only check this if we have already found a SIMD type, which will be true if // we have encountered any SIMD intrinsics. if (compiler->usesSIMDTypes() && (fieldInfo.fldSize == 0) && compiler->isSIMDorHWSIMDClass(fieldInfo.fldTypeHnd)) { unsigned simdSize; CorInfoType simdBaseJitType = compiler->getBaseJitTypeAndSizeOfSIMDType(fieldInfo.fldTypeHnd, &simdSize); // We will only promote fields of SIMD types that fit into a SIMD register. if (simdBaseJitType != CORINFO_TYPE_UNDEF) { if ((simdSize >= compiler->minSIMDStructBytes()) && (simdSize <= compiler->maxSIMDStructBytes())) { fieldInfo.fldType = compiler->getSIMDTypeForSize(simdSize); fieldInfo.fldSize = simdSize; #ifdef DEBUG retypedFieldsMap.Set(fieldInfo.fldHnd, fieldInfo.fldType, RetypedAsScalarFieldsMap::Overwrite); #endif // DEBUG } } } #endif // FEATURE_SIMD if (fieldInfo.fldSize == 0) { TryPromoteStructField(fieldInfo); } return fieldInfo; } //-------------------------------------------------------------------------------------------- // TryPromoteStructField - checks that this struct's field is a struct that can be promoted as scalar type // aligned at its natural boundary. Promotes the field as a scalar if the check succeeded. // // Arguments: // fieldInfo - information about the field in the outer struct. // // Return value: // true if the internal struct was promoted. // bool Compiler::StructPromotionHelper::TryPromoteStructField(lvaStructFieldInfo& fieldInfo) { // Size of TYP_BLK, TYP_FUNC, TYP_VOID and TYP_STRUCT is zero. // Early out if field type is other than TYP_STRUCT. // This is a defensive check as we don't expect a struct to have // fields of TYP_BLK, TYP_FUNC or TYP_VOID. if (fieldInfo.fldType != TYP_STRUCT) { return false; } COMP_HANDLE compHandle = compiler->info.compCompHnd; // Do not promote if the struct field in turn has more than one field. if (compHandle->getClassNumInstanceFields(fieldInfo.fldTypeHnd) != 1) { return false; } // Do not promote if the single field is not aligned at its natural boundary within // the struct field. CORINFO_FIELD_HANDLE innerFieldHndl = compHandle->getFieldInClass(fieldInfo.fldTypeHnd, 0); unsigned innerFieldOffset = compHandle->getFieldOffset(innerFieldHndl); if (innerFieldOffset != 0) { return false; } CorInfoType fieldCorType = compHandle->getFieldType(innerFieldHndl); var_types fieldVarType = JITtype2varType(fieldCorType); unsigned fieldSize = genTypeSize(fieldVarType); // Do not promote if the field is not a primitive type, is floating-point, // or is not properly aligned. // // TODO-PERF: Structs containing a single floating-point field on Amd64 // need to be passed in integer registers. Right now LSRA doesn't support // passing of floating-point LCL_VARS in integer registers. Enabling promotion // of such structs results in an assert in lsra right now. // // TODO-CQ: Right now we only promote an actual SIMD typed field, which would cause // a nested SIMD type to fail promotion. if (fieldSize == 0 || fieldSize > TARGET_POINTER_SIZE || varTypeIsFloating(fieldVarType)) { JITDUMP("Promotion blocked: struct contains struct field with one field," " but that field has invalid size or type.\n"); return false; } if (fieldSize != TARGET_POINTER_SIZE) { unsigned outerFieldOffset = compHandle->getFieldOffset(fieldInfo.fldHnd); if ((outerFieldOffset % fieldSize) != 0) { JITDUMP("Promotion blocked: struct contains struct field with one field," " but the outer struct offset %u is not a multiple of the inner field size %u.\n", outerFieldOffset, fieldSize); return false; } } // Insist this wrapped field occupy all of its parent storage. unsigned innerStructSize = compHandle->getClassSize(fieldInfo.fldTypeHnd); if (fieldSize != innerStructSize) { JITDUMP("Promotion blocked: struct contains struct field with one field," " but that field is not the same size as its parent.\n"); return false; } // Retype the field as the type of the single field of the struct. // This is a hack that allows us to promote such fields before we support recursive struct promotion // (tracked by #10019). fieldInfo.fldType = fieldVarType; fieldInfo.fldSize = fieldSize; #ifdef DEBUG retypedFieldsMap.Set(fieldInfo.fldHnd, fieldInfo.fldType, RetypedAsScalarFieldsMap::Overwrite); #endif // DEBUG return true; } //-------------------------------------------------------------------------------------------- // PromoteStructVar - promote struct variable. // // Arguments: // lclNum - struct local number; // void Compiler::StructPromotionHelper::PromoteStructVar(unsigned lclNum) { LclVarDsc* varDsc = compiler->lvaGetDesc(lclNum); // We should never see a reg-sized non-field-addressed struct here. assert(!varDsc->lvRegStruct); assert(varDsc->GetStructHnd() == structPromotionInfo.typeHnd); assert(structPromotionInfo.canPromote); varDsc->lvFieldCnt = structPromotionInfo.fieldCnt; varDsc->lvFieldLclStart = compiler->lvaCount; varDsc->lvPromoted = true; varDsc->lvContainsHoles = structPromotionInfo.containsHoles; varDsc->lvCustomLayout = structPromotionInfo.customLayout; #ifdef DEBUG // Don't change the source to a TYP_BLK either. varDsc->lvKeepType = 1; #endif #ifdef DEBUG if (compiler->verbose) { printf("\nPromoting struct local V%02u (%s):", lclNum, compiler->eeGetClassName(varDsc->GetStructHnd())); } #endif SortStructFields(); for (unsigned index = 0; index < structPromotionInfo.fieldCnt; ++index) { const lvaStructFieldInfo* pFieldInfo = &structPromotionInfo.fields[index]; if (varTypeUsesFloatReg(pFieldInfo->fldType)) { // Whenever we promote a struct that contains a floating point field // it's possible we transition from a method that originally only had integer // local vars to start having FP. We have to communicate this through this flag // since LSRA later on will use this flag to determine whether or not to track FP register sets. compiler->compFloatingPointUsed = true; } // Now grab the temp for the field local. #ifdef DEBUG char buf[200]; sprintf_s(buf, sizeof(buf), "%s V%02u.%s (fldOffset=0x%x)", "field", lclNum, compiler->eeGetFieldName(pFieldInfo->fldHnd), pFieldInfo->fldOffset); // We need to copy 'buf' as lvaGrabTemp() below caches a copy to its argument. size_t len = strlen(buf) + 1; char* bufp = compiler->getAllocator(CMK_DebugOnly).allocate<char>(len); strcpy_s(bufp, len, buf); if (index > 0) { noway_assert(pFieldInfo->fldOffset > (pFieldInfo - 1)->fldOffset); } #endif // Lifetime of field locals might span multiple BBs, so they must be long lifetime temps. const unsigned varNum = compiler->lvaGrabTemp(false DEBUGARG(bufp)); // lvaGrabTemp can reallocate the lvaTable, so // refresh the cached varDsc for lclNum. varDsc = compiler->lvaGetDesc(lclNum); LclVarDsc* fieldVarDsc = compiler->lvaGetDesc(varNum); fieldVarDsc->lvType = pFieldInfo->fldType; fieldVarDsc->lvExactSize = pFieldInfo->fldSize; fieldVarDsc->lvIsStructField = true; fieldVarDsc->lvFieldHnd = pFieldInfo->fldHnd; fieldVarDsc->lvFldOffset = pFieldInfo->fldOffset; fieldVarDsc->lvFldOrdinal = pFieldInfo->fldOrdinal; fieldVarDsc->lvParentLcl = lclNum; fieldVarDsc->lvIsParam = varDsc->lvIsParam; // This new local may be the first time we've seen a long typed local. if (fieldVarDsc->lvType == TYP_LONG) { compiler->compLongUsed = true; } #if defined(TARGET_AMD64) || defined(TARGET_ARM64) // Reset the implicitByRef flag. fieldVarDsc->lvIsImplicitByRef = 0; #endif // Do we have a parameter that can be enregistered? // if (varDsc->lvIsRegArg) { fieldVarDsc->lvIsRegArg = true; regNumber parentArgReg = varDsc->GetArgReg(); #if FEATURE_MULTIREG_ARGS if (!compiler->lvaIsImplicitByRefLocal(lclNum)) { #ifdef UNIX_AMD64_ABI if (varTypeIsSIMD(fieldVarDsc) && (varDsc->lvFieldCnt == 1)) { // This SIMD typed field may be passed in multiple registers. fieldVarDsc->SetArgReg(parentArgReg); fieldVarDsc->SetOtherArgReg(varDsc->GetOtherArgReg()); } else #endif // UNIX_AMD64_ABI { regNumber fieldRegNum; if (index == 0) { fieldRegNum = parentArgReg; } else if (varDsc->lvIsHfa()) { unsigned regIncrement = fieldVarDsc->lvFldOrdinal; #ifdef TARGET_ARM // TODO: Need to determine if/how to handle split args. if (varDsc->GetHfaType() == TYP_DOUBLE) { regIncrement *= 2; } #endif // TARGET_ARM fieldRegNum = (regNumber)(parentArgReg + regIncrement); } else { assert(index == 1); fieldRegNum = varDsc->GetOtherArgReg(); } fieldVarDsc->SetArgReg(fieldRegNum); } } else #endif // FEATURE_MULTIREG_ARGS && defined(FEATURE_SIMD) { fieldVarDsc->SetArgReg(parentArgReg); } } #ifdef FEATURE_SIMD if (varTypeIsSIMD(pFieldInfo->fldType)) { // Set size to zero so that lvaSetStruct will appropriately set the SIMD-relevant fields. fieldVarDsc->lvExactSize = 0; compiler->lvaSetStruct(varNum, pFieldInfo->fldTypeHnd, false, true); // We will not recursively promote this, so mark it as 'lvRegStruct' (note that we wouldn't // be promoting this if we didn't think it could be enregistered. fieldVarDsc->lvRegStruct = true; } #endif // FEATURE_SIMD #ifdef DEBUG // This temporary should not be converted to a double in stress mode, // because we introduce assigns to it after the stress conversion fieldVarDsc->lvKeepType = 1; #endif } } //-------------------------------------------------------------------------------------------- // lvaGetFieldLocal - returns the local var index for a promoted field in a promoted struct var. // // Arguments: // varDsc - the promoted struct var descriptor; // fldOffset - field offset in the struct. // // Return value: // the index of the local that represents this field. // unsigned Compiler::lvaGetFieldLocal(const LclVarDsc* varDsc, unsigned int fldOffset) { noway_assert(varTypeIsStruct(varDsc)); noway_assert(varDsc->lvPromoted); for (unsigned i = varDsc->lvFieldLclStart; i < varDsc->lvFieldLclStart + varDsc->lvFieldCnt; ++i) { noway_assert(lvaTable[i].lvIsStructField); noway_assert(lvaTable[i].lvParentLcl == (unsigned)(varDsc - lvaTable)); if (lvaTable[i].lvFldOffset == fldOffset) { return i; } } // This is the not-found error return path, the caller should check for BAD_VAR_NUM return BAD_VAR_NUM; } /***************************************************************************** * * Set the local var "varNum" as address-exposed. * If this is a promoted struct, label it's fields the same way. */ void Compiler::lvaSetVarAddrExposed(unsigned varNum DEBUGARG(AddressExposedReason reason)) { LclVarDsc* varDsc = lvaGetDesc(varNum); varDsc->SetAddressExposed(true DEBUGARG(reason)); if (varDsc->lvPromoted) { noway_assert(varTypeIsStruct(varDsc)); for (unsigned i = varDsc->lvFieldLclStart; i < varDsc->lvFieldLclStart + varDsc->lvFieldCnt; ++i) { noway_assert(lvaTable[i].lvIsStructField); lvaTable[i].SetAddressExposed(true DEBUGARG(AddressExposedReason::PARENT_EXPOSED)); lvaSetVarDoNotEnregister(i DEBUGARG(DoNotEnregisterReason::AddrExposed)); } } lvaSetVarDoNotEnregister(varNum DEBUGARG(DoNotEnregisterReason::AddrExposed)); } //------------------------------------------------------------------------ // lvaSetVarLiveInOutOfHandler: Set the local varNum as being live in and/or out of a handler // // Arguments: // varNum - the varNum of the local // void Compiler::lvaSetVarLiveInOutOfHandler(unsigned varNum) { LclVarDsc* varDsc = lvaGetDesc(varNum); varDsc->lvLiveInOutOfHndlr = 1; if (varDsc->lvPromoted) { noway_assert(varTypeIsStruct(varDsc)); for (unsigned i = varDsc->lvFieldLclStart; i < varDsc->lvFieldLclStart + varDsc->lvFieldCnt; ++i) { noway_assert(lvaTable[i].lvIsStructField); lvaTable[i].lvLiveInOutOfHndlr = 1; // For now, only enregister an EH Var if it is a single def and whose refCnt > 1. if (!lvaEnregEHVars || !lvaTable[i].lvSingleDefRegCandidate || lvaTable[i].lvRefCnt() <= 1) { lvaSetVarDoNotEnregister(i DEBUGARG(DoNotEnregisterReason::LiveInOutOfHandler)); } } } // For now, only enregister an EH Var if it is a single def and whose refCnt > 1. if (!lvaEnregEHVars || !varDsc->lvSingleDefRegCandidate || varDsc->lvRefCnt() <= 1) { lvaSetVarDoNotEnregister(varNum DEBUGARG(DoNotEnregisterReason::LiveInOutOfHandler)); } #ifdef JIT32_GCENCODER else if (lvaKeepAliveAndReportThis() && (varNum == info.compThisArg)) { // For the JIT32_GCENCODER, when lvaKeepAliveAndReportThis is true, we must either keep the "this" pointer // in the same register for the entire method, or keep it on the stack. If it is EH-exposed, we can't ever // keep it in a register, since it must also be live on the stack. Therefore, we won't attempt to allocate it. lvaSetVarDoNotEnregister(varNum DEBUGARG(DoNotEnregisterReason::LiveInOutOfHandler)); } #endif // JIT32_GCENCODER } /***************************************************************************** * * Record that the local var "varNum" should not be enregistered (for one of several reasons.) */ void Compiler::lvaSetVarDoNotEnregister(unsigned varNum DEBUGARG(DoNotEnregisterReason reason)) { LclVarDsc* varDsc = lvaGetDesc(varNum); const bool wasAlreadyMarkedDoNotEnreg = (varDsc->lvDoNotEnregister == 1); varDsc->lvDoNotEnregister = 1; #ifdef DEBUG if (!wasAlreadyMarkedDoNotEnreg) { varDsc->SetDoNotEnregReason(reason); } if (verbose) { printf("\nLocal V%02u should not be enregistered because: ", varNum); } switch (reason) { case DoNotEnregisterReason::AddrExposed: JITDUMP("it is address exposed\n"); assert(varDsc->IsAddressExposed()); break; case DoNotEnregisterReason::DontEnregStructs: JITDUMP("struct enregistration is disabled\n"); assert(varTypeIsStruct(varDsc)); break; case DoNotEnregisterReason::NotRegSizeStruct: JITDUMP("struct size does not match reg size\n"); assert(varTypeIsStruct(varDsc)); break; case DoNotEnregisterReason::LocalField: JITDUMP("was accessed as a local field\n"); break; case DoNotEnregisterReason::VMNeedsStackAddr: JITDUMP("VM needs stack addr\n"); break; case DoNotEnregisterReason::LiveInOutOfHandler: JITDUMP("live in/out of a handler\n"); varDsc->lvLiveInOutOfHndlr = 1; break; case DoNotEnregisterReason::BlockOp: JITDUMP("written/read in a block op\n"); break; case DoNotEnregisterReason::IsStructArg: if (varTypeIsStruct(varDsc)) { JITDUMP("it is a struct arg\n"); } else { JITDUMP("it is reinterpreted as a struct arg\n"); } break; case DoNotEnregisterReason::DepField: JITDUMP("field of a dependently promoted struct\n"); assert(varDsc->lvIsStructField && (lvaGetParentPromotionType(varNum) != PROMOTION_TYPE_INDEPENDENT)); break; case DoNotEnregisterReason::NoRegVars: JITDUMP("opts.compFlags & CLFLG_REGVAR is not set\n"); assert(!compEnregLocals()); break; case DoNotEnregisterReason::MinOptsGC: JITDUMP("it is a GC Ref and we are compiling MinOpts\n"); assert(!JitConfig.JitMinOptsTrackGCrefs() && varTypeIsGC(varDsc->TypeGet())); break; #if !defined(TARGET_64BIT) case DoNotEnregisterReason::LongParamField: JITDUMP("it is a decomposed field of a long parameter\n"); break; #endif #ifdef JIT32_GCENCODER case DoNotEnregisterReason::PinningRef: JITDUMP("pinning ref\n"); assert(varDsc->lvPinned); break; #endif case DoNotEnregisterReason::LclAddrNode: JITDUMP("LclAddrVar/Fld takes the address of this node\n"); break; case DoNotEnregisterReason::CastTakesAddr: JITDUMP("cast takes addr\n"); break; case DoNotEnregisterReason::StoreBlkSrc: JITDUMP("the local is used as store block src\n"); break; case DoNotEnregisterReason::OneAsgRetyping: JITDUMP("OneAsg forbids enreg\n"); break; case DoNotEnregisterReason::SwizzleArg: JITDUMP("SwizzleArg\n"); break; case DoNotEnregisterReason::BlockOpRet: JITDUMP("return uses a block op\n"); break; case DoNotEnregisterReason::ReturnSpCheck: JITDUMP("Used for SP check\n"); break; case DoNotEnregisterReason::SimdUserForcesDep: JITDUMP("Promoted struct used by a SIMD/HWI node\n"); break; default: unreached(); break; } #endif } // Returns true if this local var is a multireg struct. // TODO-Throughput: This does a lookup on the class handle, and in the outgoing arg context // this information is already available on the fgArgTabEntry, and shouldn't need to be // recomputed. // bool Compiler::lvaIsMultiregStruct(LclVarDsc* varDsc, bool isVarArg) { if (varTypeIsStruct(varDsc->TypeGet())) { CORINFO_CLASS_HANDLE clsHnd = varDsc->GetStructHnd(); structPassingKind howToPassStruct; var_types type = getArgTypeForStruct(clsHnd, &howToPassStruct, isVarArg, varDsc->lvExactSize); if (howToPassStruct == SPK_ByValueAsHfa) { assert(type == TYP_STRUCT); return true; } #if defined(UNIX_AMD64_ABI) || defined(TARGET_ARM64) if (howToPassStruct == SPK_ByValue) { assert(type == TYP_STRUCT); return true; } #endif } return false; } /***************************************************************************** * Set the lvClass for a local variable of a struct type */ void Compiler::lvaSetStruct(unsigned varNum, CORINFO_CLASS_HANDLE typeHnd, bool unsafeValueClsCheck, bool setTypeInfo) { LclVarDsc* varDsc = lvaGetDesc(varNum); if (setTypeInfo) { varDsc->lvVerTypeInfo = typeInfo(TI_STRUCT, typeHnd); } // Set the type and associated info if we haven't already set it. if (varDsc->lvType == TYP_UNDEF) { varDsc->lvType = TYP_STRUCT; } if (varDsc->GetLayout() == nullptr) { ClassLayout* layout = typGetObjLayout(typeHnd); varDsc->SetLayout(layout); assert(varDsc->lvExactSize == 0); varDsc->lvExactSize = layout->GetSize(); assert(varDsc->lvExactSize != 0); if (layout->IsValueClass()) { CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF; varDsc->lvType = impNormStructType(typeHnd, &simdBaseJitType); #if defined(TARGET_AMD64) || defined(TARGET_ARM64) // Mark implicit byref struct parameters if (varDsc->lvIsParam && !varDsc->lvIsStructField) { structPassingKind howToReturnStruct; getArgTypeForStruct(typeHnd, &howToReturnStruct, this->info.compIsVarArgs, varDsc->lvExactSize); if (howToReturnStruct == SPK_ByReference) { JITDUMP("Marking V%02i as a byref parameter\n", varNum); varDsc->lvIsImplicitByRef = 1; } } #endif // defined(TARGET_AMD64) || defined(TARGET_ARM64) #if FEATURE_SIMD if (simdBaseJitType != CORINFO_TYPE_UNDEF) { assert(varTypeIsSIMD(varDsc)); varDsc->lvSIMDType = true; varDsc->SetSimdBaseJitType(simdBaseJitType); } #endif // FEATURE_SIMD if (GlobalJitOptions::compFeatureHfa) { // For structs that are small enough, we check and set HFA element type if (varDsc->lvExactSize <= MAX_PASS_MULTIREG_BYTES) { // hfaType is set to float, double or SIMD type if it is an HFA, otherwise TYP_UNDEF var_types hfaType = GetHfaType(typeHnd); if (varTypeIsValidHfaType(hfaType)) { varDsc->SetHfaType(hfaType); // hfa variables can never contain GC pointers assert(!layout->HasGCPtr()); // The size of this struct should be evenly divisible by 4 or 8 assert((varDsc->lvExactSize % genTypeSize(hfaType)) == 0); // The number of elements in the HFA should fit into our MAX_ARG_REG_COUNT limit assert((varDsc->lvExactSize / genTypeSize(hfaType)) <= MAX_ARG_REG_COUNT); } } } } } else { #if FEATURE_SIMD assert(!varTypeIsSIMD(varDsc) || (varDsc->GetSimdBaseType() != TYP_UNKNOWN)); #endif // FEATURE_SIMD ClassLayout* layout = typGetObjLayout(typeHnd); assert(ClassLayout::AreCompatible(varDsc->GetLayout(), layout)); // Inlining could replace a canon struct type with an exact one. varDsc->SetLayout(layout); assert(varDsc->lvExactSize != 0); } #ifndef TARGET_64BIT bool fDoubleAlignHint = false; #ifdef TARGET_X86 fDoubleAlignHint = true; #endif if (info.compCompHnd->getClassAlignmentRequirement(typeHnd, fDoubleAlignHint) == 8) { #ifdef DEBUG if (verbose) { printf("Marking struct in V%02i with double align flag\n", varNum); } #endif varDsc->lvStructDoubleAlign = 1; } #endif // not TARGET_64BIT unsigned classAttribs = info.compCompHnd->getClassAttribs(typeHnd); varDsc->lvOverlappingFields = StructHasOverlappingFields(classAttribs); // Check whether this local is an unsafe value type and requires GS cookie protection. // GS checks require the stack to be re-ordered, which can't be done with EnC. if (unsafeValueClsCheck && (classAttribs & CORINFO_FLG_UNSAFE_VALUECLASS) && !opts.compDbgEnC) { setNeedsGSSecurityCookie(); compGSReorderStackLayout = true; varDsc->lvIsUnsafeBuffer = true; } #ifdef DEBUG if (JitConfig.EnableExtraSuperPmiQueries()) { makeExtraStructQueries(typeHnd, 2); } #endif // DEBUG } #ifdef DEBUG //------------------------------------------------------------------------ // makeExtraStructQueries: Query the information for the given struct handle. // // Arguments: // structHandle -- The handle for the struct type we're querying. // level -- How many more levels to recurse. // void Compiler::makeExtraStructQueries(CORINFO_CLASS_HANDLE structHandle, int level) { if (level <= 0) { return; } assert(structHandle != NO_CLASS_HANDLE); (void)typGetObjLayout(structHandle); DWORD typeFlags = info.compCompHnd->getClassAttribs(structHandle); if (StructHasDontDigFieldsFlagSet(typeFlags)) { // In AOT ReadyToRun compilation, don't query fields of types // outside of the current version bubble. return; } unsigned fieldCnt = info.compCompHnd->getClassNumInstanceFields(structHandle); impNormStructType(structHandle); #ifdef TARGET_ARMARCH GetHfaType(structHandle); #endif for (unsigned int i = 0; i < fieldCnt; i++) { CORINFO_FIELD_HANDLE fieldHandle = info.compCompHnd->getFieldInClass(structHandle, i); unsigned fldOffset = info.compCompHnd->getFieldOffset(fieldHandle); CORINFO_CLASS_HANDLE fieldClassHandle = NO_CLASS_HANDLE; CorInfoType fieldCorType = info.compCompHnd->getFieldType(fieldHandle, &fieldClassHandle); var_types fieldVarType = JITtype2varType(fieldCorType); if (fieldClassHandle != NO_CLASS_HANDLE) { if (varTypeIsStruct(fieldVarType)) { makeExtraStructQueries(fieldClassHandle, level - 1); } } } } #endif // DEBUG //------------------------------------------------------------------------ // lvaSetStructUsedAsVarArg: update hfa information for vararg struct args // // Arguments: // varNum -- number of the variable // // Notes: // This only affects arm64 varargs on windows where we need to pass // hfa arguments as if they are not HFAs. // // This function should only be called if the struct is used in a varargs // method. void Compiler::lvaSetStructUsedAsVarArg(unsigned varNum) { if (GlobalJitOptions::compFeatureHfa && TargetOS::IsWindows) { #if defined(TARGET_ARM64) LclVarDsc* varDsc = lvaGetDesc(varNum); // For varargs methods incoming and outgoing arguments should not be treated // as HFA. varDsc->SetHfaType(TYP_UNDEF); #endif // defined(TARGET_ARM64) } } //------------------------------------------------------------------------ // lvaSetClass: set class information for a local var. // // Arguments: // varNum -- number of the variable // clsHnd -- class handle to use in set or update // isExact -- true if class is known exactly // // Notes: // varNum must not already have a ref class handle. void Compiler::lvaSetClass(unsigned varNum, CORINFO_CLASS_HANDLE clsHnd, bool isExact) { noway_assert(varNum < lvaCount); // If we are just importing, we cannot reliably track local ref types, // since the jit maps CORINFO_TYPE_VAR to TYP_REF. if (compIsForImportOnly()) { return; } // Else we should have a type handle. assert(clsHnd != nullptr); LclVarDsc* varDsc = lvaGetDesc(varNum); assert(varDsc->lvType == TYP_REF); // We shoud not have any ref type information for this var. assert(varDsc->lvClassHnd == NO_CLASS_HANDLE); assert(!varDsc->lvClassIsExact); JITDUMP("\nlvaSetClass: setting class for V%02i to (%p) %s %s\n", varNum, dspPtr(clsHnd), info.compCompHnd->getClassName(clsHnd), isExact ? " [exact]" : ""); varDsc->lvClassHnd = clsHnd; varDsc->lvClassIsExact = isExact; } //------------------------------------------------------------------------ // lvaSetClass: set class information for a local var from a tree or stack type // // Arguments: // varNum -- number of the variable. Must be a single def local // tree -- tree establishing the variable's value // stackHnd -- handle for the type from the evaluation stack // // Notes: // Preferentially uses the tree's type, when available. Since not all // tree kinds can track ref types, the stack type is used as a // fallback. If there is no stack type, then the class is set to object. void Compiler::lvaSetClass(unsigned varNum, GenTree* tree, CORINFO_CLASS_HANDLE stackHnd) { bool isExact = false; bool isNonNull = false; CORINFO_CLASS_HANDLE clsHnd = gtGetClassHandle(tree, &isExact, &isNonNull); if (clsHnd != nullptr) { lvaSetClass(varNum, clsHnd, isExact); } else if (stackHnd != nullptr) { lvaSetClass(varNum, stackHnd); } else { lvaSetClass(varNum, impGetObjectClass()); } } //------------------------------------------------------------------------ // lvaUpdateClass: update class information for a local var. // // Arguments: // varNum -- number of the variable // clsHnd -- class handle to use in set or update // isExact -- true if class is known exactly // // Notes: // // This method models the type update rule for an assignment. // // Updates currently should only happen for single-def user args or // locals, when we are processing the expression actually being // used to initialize the local (or inlined arg). The update will // change the local from the declared type to the type of the // initial value. // // These updates should always *improve* what we know about the // type, that is making an inexact type exact, or changing a type // to some subtype. However the jit lacks precise type information // for shared code, so ensuring this is so is currently not // possible. void Compiler::lvaUpdateClass(unsigned varNum, CORINFO_CLASS_HANDLE clsHnd, bool isExact) { assert(varNum < lvaCount); // If we are just importing, we cannot reliably track local ref types, // since the jit maps CORINFO_TYPE_VAR to TYP_REF. if (compIsForImportOnly()) { return; } // Else we should have a class handle to consider assert(clsHnd != nullptr); LclVarDsc* varDsc = lvaGetDesc(varNum); assert(varDsc->lvType == TYP_REF); // We should already have a class assert(varDsc->lvClassHnd != NO_CLASS_HANDLE); // We should only be updating classes for single-def locals. assert(varDsc->lvSingleDef); // Now see if we should update. // // New information may not always be "better" so do some // simple analysis to decide if the update is worthwhile. const bool isNewClass = (clsHnd != varDsc->lvClassHnd); bool shouldUpdate = false; // Are we attempting to update the class? Only check this when we have // an new type and the existing class is inexact... we should not be // updating exact classes. if (!varDsc->lvClassIsExact && isNewClass) { shouldUpdate = !!info.compCompHnd->isMoreSpecificType(varDsc->lvClassHnd, clsHnd); } // Else are we attempting to update exactness? else if (isExact && !varDsc->lvClassIsExact && !isNewClass) { shouldUpdate = true; } #if DEBUG if (isNewClass || (isExact != varDsc->lvClassIsExact)) { JITDUMP("\nlvaUpdateClass:%s Updating class for V%02u", shouldUpdate ? "" : " NOT", varNum); JITDUMP(" from (%p) %s%s", dspPtr(varDsc->lvClassHnd), info.compCompHnd->getClassName(varDsc->lvClassHnd), varDsc->lvClassIsExact ? " [exact]" : ""); JITDUMP(" to (%p) %s%s\n", dspPtr(clsHnd), info.compCompHnd->getClassName(clsHnd), isExact ? " [exact]" : ""); } #endif // DEBUG if (shouldUpdate) { varDsc->lvClassHnd = clsHnd; varDsc->lvClassIsExact = isExact; #if DEBUG // Note we've modified the type... varDsc->lvClassInfoUpdated = true; #endif // DEBUG } return; } //------------------------------------------------------------------------ // lvaUpdateClass: Uupdate class information for a local var from a tree // or stack type // // Arguments: // varNum -- number of the variable. Must be a single def local // tree -- tree establishing the variable's value // stackHnd -- handle for the type from the evaluation stack // // Notes: // Preferentially uses the tree's type, when available. Since not all // tree kinds can track ref types, the stack type is used as a // fallback. void Compiler::lvaUpdateClass(unsigned varNum, GenTree* tree, CORINFO_CLASS_HANDLE stackHnd) { bool isExact = false; bool isNonNull = false; CORINFO_CLASS_HANDLE clsHnd = gtGetClassHandle(tree, &isExact, &isNonNull); if (clsHnd != nullptr) { lvaUpdateClass(varNum, clsHnd, isExact); } else if (stackHnd != nullptr) { lvaUpdateClass(varNum, stackHnd); } } //------------------------------------------------------------------------ // lvaLclSize: returns size of a local variable, in bytes // // Arguments: // varNum -- variable to query // // Returns: // Number of bytes needed on the frame for such a local. unsigned Compiler::lvaLclSize(unsigned varNum) { assert(varNum < lvaCount); var_types varType = lvaTable[varNum].TypeGet(); switch (varType) { case TYP_STRUCT: case TYP_BLK: return lvaTable[varNum].lvSize(); case TYP_LCLBLK: #if FEATURE_FIXED_OUT_ARGS // Note that this operation performs a read of a PhasedVar noway_assert(varNum == lvaOutgoingArgSpaceVar); return lvaOutgoingArgSpaceSize; #else // FEATURE_FIXED_OUT_ARGS assert(!"Unknown size"); NO_WAY("Target doesn't support TYP_LCLBLK"); #endif // FEATURE_FIXED_OUT_ARGS default: // This must be a primitive var. Fall out of switch statement break; } #ifdef TARGET_64BIT // We only need this Quirk for TARGET_64BIT if (lvaTable[varNum].lvQuirkToLong) { noway_assert(lvaTable[varNum].IsAddressExposed()); return genTypeStSz(TYP_LONG) * sizeof(int); // return 8 (2 * 4) } #endif return genTypeStSz(varType) * sizeof(int); } // // Return the exact width of local variable "varNum" -- the number of bytes // you'd need to copy in order to overwrite the value. // unsigned Compiler::lvaLclExactSize(unsigned varNum) { assert(varNum < lvaCount); var_types varType = lvaTable[varNum].TypeGet(); switch (varType) { case TYP_STRUCT: case TYP_BLK: return lvaTable[varNum].lvExactSize; case TYP_LCLBLK: #if FEATURE_FIXED_OUT_ARGS // Note that this operation performs a read of a PhasedVar noway_assert(lvaOutgoingArgSpaceSize >= 0); noway_assert(varNum == lvaOutgoingArgSpaceVar); return lvaOutgoingArgSpaceSize; #else // FEATURE_FIXED_OUT_ARGS assert(!"Unknown size"); NO_WAY("Target doesn't support TYP_LCLBLK"); #endif // FEATURE_FIXED_OUT_ARGS default: // This must be a primitive var. Fall out of switch statement break; } return genTypeSize(varType); } // getCalledCount -- get the value used to normalized weights for this method // if we don't have profile data then getCalledCount will return BB_UNITY_WEIGHT (100) // otherwise it returns the number of times that profile data says the method was called. // // static weight_t BasicBlock::getCalledCount(Compiler* comp) { // when we don't have profile data then fgCalledCount will be BB_UNITY_WEIGHT (100) weight_t calledCount = comp->fgCalledCount; // If we haven't yet reach the place where we setup fgCalledCount it could still be zero // so return a reasonable value to use until we set it. // if (calledCount == 0) { if (comp->fgIsUsingProfileWeights()) { // When we use profile data block counts we have exact counts, // not multiples of BB_UNITY_WEIGHT (100) calledCount = 1; } else { calledCount = comp->fgFirstBB->bbWeight; if (calledCount == 0) { calledCount = BB_UNITY_WEIGHT; } } } return calledCount; } // getBBWeight -- get the normalized weight of this block weight_t BasicBlock::getBBWeight(Compiler* comp) { if (this->bbWeight == BB_ZERO_WEIGHT) { return BB_ZERO_WEIGHT; } else { weight_t calledCount = getCalledCount(comp); // Normalize the bbWeights by multiplying by BB_UNITY_WEIGHT and dividing by the calledCount. // weight_t fullResult = this->bbWeight * BB_UNITY_WEIGHT / calledCount; return fullResult; } } // LclVarDsc "less" comparer used to compare the weight of two locals, when optimizing for small code. class LclVarDsc_SmallCode_Less { const LclVarDsc* m_lvaTable; INDEBUG(unsigned m_lvaCount;) public: LclVarDsc_SmallCode_Less(const LclVarDsc* lvaTable DEBUGARG(unsigned lvaCount)) : m_lvaTable(lvaTable) #ifdef DEBUG , m_lvaCount(lvaCount) #endif { } bool operator()(unsigned n1, unsigned n2) { assert(n1 < m_lvaCount); assert(n2 < m_lvaCount); const LclVarDsc* dsc1 = &m_lvaTable[n1]; const LclVarDsc* dsc2 = &m_lvaTable[n2]; // We should not be sorting untracked variables assert(dsc1->lvTracked); assert(dsc2->lvTracked); // We should not be sorting after registers have been allocated assert(!dsc1->lvRegister); assert(!dsc2->lvRegister); unsigned weight1 = dsc1->lvRefCnt(); unsigned weight2 = dsc2->lvRefCnt(); #ifndef TARGET_ARM // ARM-TODO: this was disabled for ARM under !FEATURE_FP_REGALLOC; it was probably a left-over from // legacy backend. It should be enabled and verified. // Force integer candidates to sort above float candidates. const bool isFloat1 = isFloatRegType(dsc1->lvType); const bool isFloat2 = isFloatRegType(dsc2->lvType); if (isFloat1 != isFloat2) { if ((weight2 != 0) && isFloat1) { return false; } if ((weight1 != 0) && isFloat2) { return true; } } #endif if (weight1 != weight2) { return weight1 > weight2; } // If the weighted ref counts are different then use their difference. if (dsc1->lvRefCntWtd() != dsc2->lvRefCntWtd()) { return dsc1->lvRefCntWtd() > dsc2->lvRefCntWtd(); } // We have equal ref counts and weighted ref counts. // Break the tie by: // - Increasing the weight by 2 if we are a register arg. // - Increasing the weight by 0.5 if we are a GC type. // // Review: seems odd that this is mixing counts and weights. if (weight1 != 0) { if (dsc1->lvIsRegArg) { weight1 += 2 * BB_UNITY_WEIGHT_UNSIGNED; } if (varTypeIsGC(dsc1->TypeGet())) { weight1 += BB_UNITY_WEIGHT_UNSIGNED / 2; } } if (weight2 != 0) { if (dsc2->lvIsRegArg) { weight2 += 2 * BB_UNITY_WEIGHT_UNSIGNED; } if (varTypeIsGC(dsc2->TypeGet())) { weight2 += BB_UNITY_WEIGHT_UNSIGNED / 2; } } if (weight1 != weight2) { return weight1 > weight2; } // To achieve a stable sort we use the LclNum (by way of the pointer address). return dsc1 < dsc2; } }; // LclVarDsc "less" comparer used to compare the weight of two locals, when optimizing for blended code. class LclVarDsc_BlendedCode_Less { const LclVarDsc* m_lvaTable; INDEBUG(unsigned m_lvaCount;) public: LclVarDsc_BlendedCode_Less(const LclVarDsc* lvaTable DEBUGARG(unsigned lvaCount)) : m_lvaTable(lvaTable) #ifdef DEBUG , m_lvaCount(lvaCount) #endif { } bool operator()(unsigned n1, unsigned n2) { assert(n1 < m_lvaCount); assert(n2 < m_lvaCount); const LclVarDsc* dsc1 = &m_lvaTable[n1]; const LclVarDsc* dsc2 = &m_lvaTable[n2]; // We should not be sorting untracked variables assert(dsc1->lvTracked); assert(dsc2->lvTracked); // We should not be sorting after registers have been allocated assert(!dsc1->lvRegister); assert(!dsc2->lvRegister); weight_t weight1 = dsc1->lvRefCntWtd(); weight_t weight2 = dsc2->lvRefCntWtd(); #ifndef TARGET_ARM // ARM-TODO: this was disabled for ARM under !FEATURE_FP_REGALLOC; it was probably a left-over from // legacy backend. It should be enabled and verified. // Force integer candidates to sort above float candidates. const bool isFloat1 = isFloatRegType(dsc1->lvType); const bool isFloat2 = isFloatRegType(dsc2->lvType); if (isFloat1 != isFloat2) { if (!Compiler::fgProfileWeightsEqual(weight2, 0) && isFloat1) { return false; } if (!Compiler::fgProfileWeightsEqual(weight1, 0) && isFloat2) { return true; } } #endif if (!Compiler::fgProfileWeightsEqual(weight1, 0) && dsc1->lvIsRegArg) { weight1 += 2 * BB_UNITY_WEIGHT; } if (!Compiler::fgProfileWeightsEqual(weight2, 0) && dsc2->lvIsRegArg) { weight2 += 2 * BB_UNITY_WEIGHT; } if (!Compiler::fgProfileWeightsEqual(weight1, weight2)) { return weight1 > weight2; } // If the weighted ref counts are different then try the unweighted ref counts. if (dsc1->lvRefCnt() != dsc2->lvRefCnt()) { return dsc1->lvRefCnt() > dsc2->lvRefCnt(); } // If one is a GC type and the other is not the GC type wins. if (varTypeIsGC(dsc1->TypeGet()) != varTypeIsGC(dsc2->TypeGet())) { return varTypeIsGC(dsc1->TypeGet()); } // To achieve a stable sort we use the LclNum (by way of the pointer address). return dsc1 < dsc2; } }; /***************************************************************************** * * Sort the local variable table by refcount and assign tracking indices. */ void Compiler::lvaSortByRefCount() { lvaTrackedCount = 0; lvaTrackedCountInSizeTUnits = 0; #ifdef DEBUG VarSetOps::AssignNoCopy(this, lvaTrackedVars, VarSetOps::MakeEmpty(this)); #endif if (lvaCount == 0) { return; } /* We'll sort the variables by ref count - allocate the sorted table */ if (lvaTrackedToVarNumSize < lvaCount) { lvaTrackedToVarNumSize = lvaCount; lvaTrackedToVarNum = new (getAllocator(CMK_LvaTable)) unsigned[lvaTrackedToVarNumSize]; } unsigned trackedCount = 0; unsigned* tracked = lvaTrackedToVarNum; // Fill in the table used for sorting for (unsigned lclNum = 0; lclNum < lvaCount; lclNum++) { LclVarDsc* varDsc = lvaGetDesc(lclNum); // Start by assuming that the variable will be tracked. varDsc->lvTracked = 1; if (varDsc->lvRefCnt() == 0) { // Zero ref count, make this untracked. varDsc->lvTracked = 0; varDsc->setLvRefCntWtd(0); } #if !defined(TARGET_64BIT) if (varTypeIsLong(varDsc) && varDsc->lvPromoted) { varDsc->lvTracked = 0; } #endif // !defined(TARGET_64BIT) // Variables that are address-exposed, and all struct locals, are never enregistered, or tracked. // (The struct may be promoted, and its field variables enregistered/tracked, or the VM may "normalize" // its type so that its not seen by the JIT as a struct.) // Pinned variables may not be tracked (a condition of the GCInfo representation) // or enregistered, on x86 -- it is believed that we can enregister pinned (more properly, "pinning") // references when using the general GC encoding. if (varDsc->IsAddressExposed()) { varDsc->lvTracked = 0; assert(varDsc->lvType != TYP_STRUCT || varDsc->lvDoNotEnregister); // For structs, should have set this when we set m_addrExposed. } if (varTypeIsStruct(varDsc)) { // Promoted structs will never be considered for enregistration anyway, // and the DoNotEnregister flag was used to indicate whether promotion was // independent or dependent. if (varDsc->lvPromoted) { varDsc->lvTracked = 0; } else if (!varDsc->IsEnregisterableType()) { lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::NotRegSizeStruct)); } else if (varDsc->lvType == TYP_STRUCT) { if (!varDsc->lvRegStruct && !compEnregStructLocals()) { lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::DontEnregStructs)); } else if (varDsc->lvIsMultiRegArgOrRet()) { // Prolog and return generators do not support SIMD<->general register moves. lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::IsStructArg)); } #if defined(TARGET_ARM) else if (varDsc->lvIsParam) { // On arm we prespill all struct args, // TODO-Arm-CQ: keep them in registers, it will need a fix // to "On the ARM we will spill any incoming struct args" logic in codegencommon. lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::IsStructArg)); } #endif // TARGET_ARM } } if (varDsc->lvIsStructField && (lvaGetParentPromotionType(lclNum) != PROMOTION_TYPE_INDEPENDENT)) { lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::DepField)); } if (varDsc->lvPinned) { varDsc->lvTracked = 0; #ifdef JIT32_GCENCODER lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::PinningRef)); #endif } if (opts.MinOpts() && !JitConfig.JitMinOptsTrackGCrefs() && varTypeIsGC(varDsc->TypeGet())) { varDsc->lvTracked = 0; lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::MinOptsGC)); } if (!compEnregLocals()) { lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::NoRegVars)); } #if defined(JIT32_GCENCODER) && defined(FEATURE_EH_FUNCLETS) if (lvaIsOriginalThisArg(lclNum) && (info.compMethodInfo->options & CORINFO_GENERICS_CTXT_FROM_THIS) != 0) { // For x86/Linux, we need to track "this". // However we cannot have it in tracked variables, so we set "this" pointer always untracked varDsc->lvTracked = 0; } #endif // Are we not optimizing and we have exception handlers? // if so mark all args and locals "do not enregister". // if (opts.MinOpts() && compHndBBtabCount > 0) { lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::LiveInOutOfHandler)); } else { var_types type = genActualType(varDsc->TypeGet()); switch (type) { case TYP_FLOAT: case TYP_DOUBLE: case TYP_INT: case TYP_LONG: case TYP_REF: case TYP_BYREF: #ifdef FEATURE_SIMD case TYP_SIMD8: case TYP_SIMD12: case TYP_SIMD16: case TYP_SIMD32: #endif // FEATURE_SIMD case TYP_STRUCT: break; case TYP_UNDEF: case TYP_UNKNOWN: noway_assert(!"lvType not set correctly"); varDsc->lvType = TYP_INT; FALLTHROUGH; default: varDsc->lvTracked = 0; } } if (varDsc->lvTracked) { tracked[trackedCount++] = lclNum; } } // Now sort the tracked variable table by ref-count if (compCodeOpt() == SMALL_CODE) { jitstd::sort(tracked, tracked + trackedCount, LclVarDsc_SmallCode_Less(lvaTable DEBUGARG(lvaCount))); } else { jitstd::sort(tracked, tracked + trackedCount, LclVarDsc_BlendedCode_Less(lvaTable DEBUGARG(lvaCount))); } lvaTrackedCount = min((unsigned)JitConfig.JitMaxLocalsToTrack(), trackedCount); JITDUMP("Tracked variable (%u out of %u) table:\n", lvaTrackedCount, lvaCount); // Assign indices to all the variables we've decided to track for (unsigned varIndex = 0; varIndex < lvaTrackedCount; varIndex++) { LclVarDsc* varDsc = lvaGetDesc(tracked[varIndex]); assert(varDsc->lvTracked); varDsc->lvVarIndex = static_cast<unsigned short>(varIndex); INDEBUG(if (verbose) { gtDispLclVar(tracked[varIndex]); }) JITDUMP(" [%6s]: refCnt = %4u, refCntWtd = %6s\n", varTypeName(varDsc->TypeGet()), varDsc->lvRefCnt(), refCntWtd2str(varDsc->lvRefCntWtd())); } JITDUMP("\n"); // Mark all variables past the first 'lclMAX_TRACKED' as untracked for (unsigned varIndex = lvaTrackedCount; varIndex < trackedCount; varIndex++) { LclVarDsc* varDsc = lvaGetDesc(tracked[varIndex]); assert(varDsc->lvTracked); varDsc->lvTracked = 0; } // We have a new epoch, and also cache the tracked var count in terms of size_t's sufficient to hold that many bits. lvaCurEpoch++; lvaTrackedCountInSizeTUnits = roundUp((unsigned)lvaTrackedCount, (unsigned)(sizeof(size_t) * 8)) / unsigned(sizeof(size_t) * 8); #ifdef DEBUG VarSetOps::AssignNoCopy(this, lvaTrackedVars, VarSetOps::MakeFull(this)); #endif } /***************************************************************************** * * This is called by lvaMarkLclRefs to disqualify a variable from being * considered by optAddCopies() */ void LclVarDsc::lvaDisqualifyVar() { this->lvDisqualify = true; this->lvSingleDef = false; this->lvDefStmt = nullptr; } #ifdef FEATURE_SIMD var_types LclVarDsc::GetSimdBaseType() const { CorInfoType simdBaseJitType = GetSimdBaseJitType(); if (simdBaseJitType == CORINFO_TYPE_UNDEF) { return TYP_UNKNOWN; } return JitType2PreciseVarType(simdBaseJitType); } #endif // FEATURE_SIMD unsigned LclVarDsc::lvSize() const // Size needed for storage representation. Only used for structs or TYP_BLK. { // TODO-Review: Sometimes we get called on ARM with HFA struct variables that have been promoted, // where the struct itself is no longer used because all access is via its member fields. // When that happens, the struct is marked as unused and its type has been changed to // TYP_INT (to keep the GC tracking code from looking at it). // See Compiler::raAssignVars() for details. For example: // N002 ( 4, 3) [00EA067C] ------------- return struct $346 // N001 ( 3, 2) [00EA0628] ------------- lclVar struct(U) V03 loc2 // float V03.f1 (offs=0x00) -> V12 tmp7 // f8 (last use) (last use) $345 // Here, the "struct(U)" shows that the "V03 loc2" variable is unused. Not shown is that V03 // is now TYP_INT in the local variable table. It's not really unused, because it's in the tree. assert(varTypeIsStruct(lvType) || (lvType == TYP_BLK) || (lvPromoted && lvUnusedStruct)); if (lvIsParam) { assert(varTypeIsStruct(lvType)); const bool isFloatHfa = (lvIsHfa() && (GetHfaType() == TYP_FLOAT)); const unsigned argAlignment = Compiler::eeGetArgAlignment(lvType, isFloatHfa); return roundUp(lvExactSize, argAlignment); } #if defined(FEATURE_SIMD) && !defined(TARGET_64BIT) // For 32-bit architectures, we make local variable SIMD12 types 16 bytes instead of just 12. We can't do // this for arguments, which must be passed according the defined ABI. We don't want to do this for // dependently promoted struct fields, but we don't know that here. See lvaMapSimd12ToSimd16(). // (Note that for 64-bits, we are already rounding up to 16.) if (lvType == TYP_SIMD12) { assert(!lvIsParam); assert(lvExactSize == 12); return 16; } #endif // defined(FEATURE_SIMD) && !defined(TARGET_64BIT) return roundUp(lvExactSize, TARGET_POINTER_SIZE); } /********************************************************************************** * Get stack size of the varDsc. */ size_t LclVarDsc::lvArgStackSize() const { // Make sure this will have a stack size assert(!this->lvIsRegArg); size_t stackSize = 0; if (varTypeIsStruct(this)) { #if defined(WINDOWS_AMD64_ABI) // Structs are either passed by reference or can be passed by value using one pointer stackSize = TARGET_POINTER_SIZE; #elif defined(TARGET_ARM64) || defined(UNIX_AMD64_ABI) // lvSize performs a roundup. stackSize = this->lvSize(); #if defined(TARGET_ARM64) if ((stackSize > TARGET_POINTER_SIZE * 2) && (!this->lvIsHfa())) { // If the size is greater than 16 bytes then it will // be passed by reference. stackSize = TARGET_POINTER_SIZE; } #endif // defined(TARGET_ARM64) #else // !TARGET_ARM64 !WINDOWS_AMD64_ABI !UNIX_AMD64_ABI NYI("Unsupported target."); unreached(); #endif // !TARGET_ARM64 !WINDOWS_AMD64_ABI !UNIX_AMD64_ABI } else { stackSize = TARGET_POINTER_SIZE; } return stackSize; } //------------------------------------------------------------------------ // GetRegisterType: Determine register type for this local var. // // Arguments: // tree - node that uses the local, its type is checked first. // // Return Value: // TYP_UNDEF if the layout is not enregistrable, the register type otherwise. // var_types LclVarDsc::GetRegisterType(const GenTreeLclVarCommon* tree) const { var_types targetType = tree->gtType; var_types lclVarType = TypeGet(); if (targetType == TYP_STRUCT) { if (lclVarType == TYP_STRUCT) { assert(!tree->OperIsLocalField() && "do not expect struct local fields."); lclVarType = GetLayout()->GetRegisterType(); } targetType = lclVarType; } #ifdef DEBUG if ((targetType != TYP_UNDEF) && tree->OperIs(GT_STORE_LCL_VAR) && lvNormalizeOnStore()) { const bool phiStore = (tree->gtGetOp1()->OperIsNonPhiLocal() == false); // Ensure that the lclVar node is typed correctly, // does not apply to phi-stores because they do not produce code in the merge block. assert(phiStore || targetType == genActualType(lclVarType)); } #endif return targetType; } //------------------------------------------------------------------------ // GetRegisterType: Determine register type for this local var. // // Return Value: // TYP_UNDEF if the layout is not enregistrable, the register type otherwise. // var_types LclVarDsc::GetRegisterType() const { if (TypeGet() != TYP_STRUCT) { #if !defined(TARGET_64BIT) if (TypeGet() == TYP_LONG) { return TYP_UNDEF; } #endif return TypeGet(); } assert(m_layout != nullptr); return m_layout->GetRegisterType(); } //------------------------------------------------------------------------ // GetActualRegisterType: Determine an actual register type for this local var. // // Return Value: // TYP_UNDEF if the layout is not enregistrable, the register type otherwise. // var_types LclVarDsc::GetActualRegisterType() const { return genActualType(GetRegisterType()); } //---------------------------------------------------------------------------------------------- // CanBeReplacedWithItsField: check if a whole struct reference could be replaced by a field. // // Arguments: // comp - the compiler instance; // // Return Value: // true if that can be replaced, false otherwise. // // Notes: // The replacement can be made only for independently promoted structs // with 1 field without holes. // bool LclVarDsc::CanBeReplacedWithItsField(Compiler* comp) const { if (!lvPromoted) { return false; } if (comp->lvaGetPromotionType(this) != Compiler::PROMOTION_TYPE_INDEPENDENT) { return false; } if (lvFieldCnt != 1) { return false; } if (lvContainsHoles) { return false; } #if defined(FEATURE_SIMD) // If we return `struct A { SIMD16 a; }` we split the struct into several fields. // In order to do that we have to have its field `a` in memory. Right now lowering cannot // handle RETURN struct(multiple registers)->SIMD16(one register), but it can be improved. LclVarDsc* fieldDsc = comp->lvaGetDesc(lvFieldLclStart); if (varTypeIsSIMD(fieldDsc)) { return false; } #endif // FEATURE_SIMD return true; } //------------------------------------------------------------------------ // lvaMarkLclRefs: increment local var references counts and more // // Arguments: // tree - some node in a tree // block - block that the tree node belongs to // stmt - stmt that the tree node belongs to // isRecompute - true if we should just recompute counts // // Notes: // Invoked via the MarkLocalVarsVisitor // // Primarily increments the regular and weighted local var ref // counts for any local referred to directly by tree. // // Also: // // Accounts for implicit references to frame list root for // pinvokes that will be expanded later. // // Determines if locals of TYP_BOOL can safely be considered // to hold only 0 or 1 or may have a broader range of true values. // // Does some setup work for assertion prop, noting locals that are // eligible for assertion prop, single defs, and tracking which blocks // hold uses. // // Looks for uses of generic context and sets lvaGenericsContextInUse. // // In checked builds: // // Verifies that local accesses are consistenly typed. // Verifies that casts remain in bounds. void Compiler::lvaMarkLclRefs(GenTree* tree, BasicBlock* block, Statement* stmt, bool isRecompute) { const weight_t weight = block->getBBWeight(this); /* Is this a call to unmanaged code ? */ if (tree->IsCall() && compMethodRequiresPInvokeFrame()) { assert((!opts.ShouldUsePInvokeHelpers()) || (info.compLvFrameListRoot == BAD_VAR_NUM)); if (!opts.ShouldUsePInvokeHelpers()) { /* Get the special variable descriptor */ LclVarDsc* varDsc = lvaGetDesc(info.compLvFrameListRoot); /* Increment the ref counts twice */ varDsc->incRefCnts(weight, this); varDsc->incRefCnts(weight, this); } } if (!isRecompute) { /* Is this an assigment? */ if (tree->OperIs(GT_ASG)) { GenTree* op1 = tree->AsOp()->gtOp1; GenTree* op2 = tree->AsOp()->gtOp2; /* Is this an assignment to a local variable? */ if (op1->gtOper == GT_LCL_VAR && op2->gtType != TYP_BOOL) { /* Only simple assignments allowed for booleans */ if (tree->gtOper != GT_ASG) { goto NOT_BOOL; } /* Is the RHS clearly a boolean value? */ switch (op2->gtOper) { unsigned lclNum; case GT_CNS_INT: if (op2->AsIntCon()->gtIconVal == 0) { break; } if (op2->AsIntCon()->gtIconVal == 1) { break; } // Not 0 or 1, fall through .... FALLTHROUGH; default: if (op2->OperIsCompare()) { break; } NOT_BOOL: lclNum = op1->AsLclVarCommon()->GetLclNum(); noway_assert(lclNum < lvaCount); lvaTable[lclNum].lvIsBoolean = false; break; } } } } if (tree->OperIsLocalAddr()) { LclVarDsc* varDsc = lvaGetDesc(tree->AsLclVarCommon()); assert(varDsc->IsAddressExposed()); varDsc->incRefCnts(weight, this); return; } if ((tree->gtOper != GT_LCL_VAR) && (tree->gtOper != GT_LCL_FLD)) { return; } /* This must be a local variable reference */ // See if this is a generics context use. if ((tree->gtFlags & GTF_VAR_CONTEXT) != 0) { assert(tree->OperIs(GT_LCL_VAR)); if (!lvaGenericsContextInUse) { JITDUMP("-- generic context in use at [%06u]\n", dspTreeID(tree)); lvaGenericsContextInUse = true; } } assert((tree->gtOper == GT_LCL_VAR) || (tree->gtOper == GT_LCL_FLD)); unsigned lclNum = tree->AsLclVarCommon()->GetLclNum(); LclVarDsc* varDsc = lvaGetDesc(lclNum); /* Increment the reference counts */ varDsc->incRefCnts(weight, this); #ifdef DEBUG if (varDsc->lvIsStructField) { // If ref count was increased for struct field, ensure that the // parent struct is still promoted. LclVarDsc* parentStruct = lvaGetDesc(varDsc->lvParentLcl); assert(!parentStruct->lvUndoneStructPromotion); } #endif if (!isRecompute) { if (lvaVarAddrExposed(lclNum)) { varDsc->lvIsBoolean = false; } if (tree->gtOper == GT_LCL_FLD) { // variables that have uses inside a GT_LCL_FLD // cause problems, so we will disqualify them here varDsc->lvaDisqualifyVar(); return; } if (fgDomsComputed && IsDominatedByExceptionalEntry(block)) { SetVolatileHint(varDsc); } /* Record if the variable has a single def or not */ if (!varDsc->lvDisqualify) // If this variable is already disqualified, we can skip this { if (tree->gtFlags & GTF_VAR_DEF) // Is this is a def of our variable { /* If we have one of these cases: 1. We have already seen a definition (i.e lvSingleDef is true) 2. or info.CompInitMem is true (thus this would be the second definition) 3. or we have an assignment inside QMARK-COLON trees 4. or we have an update form of assignment (i.e. +=, -=, *=) Then we must disqualify this variable for use in optAddCopies() Note that all parameters start out with lvSingleDef set to true */ if ((varDsc->lvSingleDef == true) || (info.compInitMem == true) || (tree->gtFlags & GTF_COLON_COND) || (tree->gtFlags & GTF_VAR_USEASG)) { varDsc->lvaDisqualifyVar(); } else { varDsc->lvSingleDef = true; varDsc->lvDefStmt = stmt; } } else // otherwise this is a ref of our variable { if (BlockSetOps::MayBeUninit(varDsc->lvRefBlks)) { // Lazy initialization BlockSetOps::AssignNoCopy(this, varDsc->lvRefBlks, BlockSetOps::MakeEmpty(this)); } BlockSetOps::AddElemD(this, varDsc->lvRefBlks, block->bbNum); } } if (!varDsc->lvDisqualifySingleDefRegCandidate) // If this var is already disqualified, we can skip this { if (tree->gtFlags & GTF_VAR_DEF) // Is this is a def of our variable { bool bbInALoop = (block->bbFlags & BBF_BACKWARD_JUMP) != 0; bool bbIsReturn = block->bbJumpKind == BBJ_RETURN; // TODO: Zero-inits in LSRA are created with below condition. But if filter out based on that condition // we filter lot of interesting variables that would benefit otherwise with EH var enregistration. // bool needsExplicitZeroInit = !varDsc->lvIsParam && (info.compInitMem || // varTypeIsGC(varDsc->TypeGet())); bool needsExplicitZeroInit = fgVarNeedsExplicitZeroInit(lclNum, bbInALoop, bbIsReturn); if (varDsc->lvSingleDefRegCandidate || needsExplicitZeroInit) { #ifdef DEBUG if (needsExplicitZeroInit) { varDsc->lvSingleDefDisqualifyReason = 'Z'; JITDUMP("V%02u needs explicit zero init. Disqualified as a single-def register candidate.\n", lclNum); } else { varDsc->lvSingleDefDisqualifyReason = 'M'; JITDUMP("V%02u has multiple definitions. Disqualified as a single-def register candidate.\n", lclNum); } #endif // DEBUG varDsc->lvSingleDefRegCandidate = false; varDsc->lvDisqualifySingleDefRegCandidate = true; } else { #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE // TODO-CQ: If the varType needs partial callee save, conservatively do not enregister // such variable. In future, need to enable enregisteration for such variables. if (!varTypeNeedsPartialCalleeSave(varDsc->GetRegisterType())) #endif { varDsc->lvSingleDefRegCandidate = true; JITDUMP("Marking EH Var V%02u as a register candidate.\n", lclNum); } } } } bool allowStructs = false; #ifdef UNIX_AMD64_ABI // On System V the type of the var could be a struct type. allowStructs = varTypeIsStruct(varDsc); #endif // UNIX_AMD64_ABI /* Variables must be used as the same type throughout the method */ noway_assert(varDsc->lvType == TYP_UNDEF || tree->gtType == TYP_UNKNOWN || allowStructs || genActualType(varDsc->TypeGet()) == genActualType(tree->gtType) || (tree->gtType == TYP_BYREF && varDsc->TypeGet() == TYP_I_IMPL) || (tree->gtType == TYP_I_IMPL && varDsc->TypeGet() == TYP_BYREF) || (tree->gtFlags & GTF_VAR_CAST) || (varTypeIsFloating(varDsc) && varTypeIsFloating(tree)) || (varTypeIsStruct(varDsc) == varTypeIsStruct(tree))); /* Remember the type of the reference */ if (tree->gtType == TYP_UNKNOWN || varDsc->lvType == TYP_UNDEF) { varDsc->lvType = tree->gtType; noway_assert(genActualType(varDsc->TypeGet()) == tree->gtType); // no truncation } #ifdef DEBUG if (tree->gtFlags & GTF_VAR_CAST) { // it should never be bigger than the variable slot // Trees don't store the full information about structs // so we can't check them. if (tree->TypeGet() != TYP_STRUCT) { unsigned treeSize = genTypeSize(tree->TypeGet()); unsigned varSize = genTypeSize(varDsc->TypeGet()); if (varDsc->TypeGet() == TYP_STRUCT) { varSize = varDsc->lvSize(); } assert(treeSize <= varSize); } } #endif } } //------------------------------------------------------------------------ // IsDominatedByExceptionalEntry: Check is the block dominated by an exception entry block. // // Arguments: // block - the checking block. // bool Compiler::IsDominatedByExceptionalEntry(BasicBlock* block) { assert(fgDomsComputed); return block->IsDominatedByExceptionalEntryFlag(); } //------------------------------------------------------------------------ // SetVolatileHint: Set a local var's volatile hint. // // Arguments: // varDsc - the local variable that needs the hint. // void Compiler::SetVolatileHint(LclVarDsc* varDsc) { varDsc->lvVolatileHint = true; } //------------------------------------------------------------------------ // lvaMarkLocalVars: update local var ref counts for IR in a basic block // // Arguments: // block - the block in question // isRecompute - true if counts are being recomputed // // Notes: // Invokes lvaMarkLclRefs on each tree node for each // statement in the block. void Compiler::lvaMarkLocalVars(BasicBlock* block, bool isRecompute) { class MarkLocalVarsVisitor final : public GenTreeVisitor<MarkLocalVarsVisitor> { private: BasicBlock* m_block; Statement* m_stmt; bool m_isRecompute; public: enum { DoPreOrder = true, }; MarkLocalVarsVisitor(Compiler* compiler, BasicBlock* block, Statement* stmt, bool isRecompute) : GenTreeVisitor<MarkLocalVarsVisitor>(compiler), m_block(block), m_stmt(stmt), m_isRecompute(isRecompute) { } Compiler::fgWalkResult PreOrderVisit(GenTree** use, GenTree* user) { // TODO: Stop passing isRecompute once we are sure that this assert is never hit. assert(!m_isRecompute); m_compiler->lvaMarkLclRefs(*use, m_block, m_stmt, m_isRecompute); return WALK_CONTINUE; } }; JITDUMP("\n*** %s local variables in block " FMT_BB " (weight=%s)\n", isRecompute ? "recomputing" : "marking", block->bbNum, refCntWtd2str(block->getBBWeight(this))); for (Statement* const stmt : block->NonPhiStatements()) { MarkLocalVarsVisitor visitor(this, block, stmt, isRecompute); DISPSTMT(stmt); visitor.WalkTree(stmt->GetRootNodePointer(), nullptr); } } //------------------------------------------------------------------------ // lvaMarkLocalVars: enable normal ref counting, compute initial counts, sort locals table // // Notes: // Now behaves differently in minopts / debug. Instead of actually inspecting // the IR and counting references, the jit assumes all locals are referenced // and does not sort the locals table. // // Also, when optimizing, lays the groundwork for assertion prop and more. // See details in lvaMarkLclRefs. void Compiler::lvaMarkLocalVars() { JITDUMP("\n*************** In lvaMarkLocalVars()"); // If we have direct pinvokes, verify the frame list root local was set up properly if (compMethodRequiresPInvokeFrame()) { assert((!opts.ShouldUsePInvokeHelpers()) || (info.compLvFrameListRoot == BAD_VAR_NUM)); if (!opts.ShouldUsePInvokeHelpers()) { noway_assert(info.compLvFrameListRoot >= info.compLocalsCount && info.compLvFrameListRoot < lvaCount); } } #if !defined(FEATURE_EH_FUNCLETS) // Grab space for exception handling if (ehNeedsShadowSPslots()) { // The first slot is reserved for ICodeManager::FixContext(ppEndRegion) // ie. the offset of the end-of-last-executed-filter unsigned slotsNeeded = 1; unsigned handlerNestingLevel = ehMaxHndNestingCount; if (opts.compDbgEnC && (handlerNestingLevel < (unsigned)MAX_EnC_HANDLER_NESTING_LEVEL)) handlerNestingLevel = (unsigned)MAX_EnC_HANDLER_NESTING_LEVEL; slotsNeeded += handlerNestingLevel; // For a filter (which can be active at the same time as a catch/finally handler) slotsNeeded++; // For zero-termination of the shadow-Stack-pointer chain slotsNeeded++; lvaShadowSPslotsVar = lvaGrabTempWithImplicitUse(false DEBUGARG("lvaShadowSPslotsVar")); LclVarDsc* shadowSPslotsVar = lvaGetDesc(lvaShadowSPslotsVar); shadowSPslotsVar->lvType = TYP_BLK; shadowSPslotsVar->lvExactSize = (slotsNeeded * TARGET_POINTER_SIZE); } #endif // !FEATURE_EH_FUNCLETS // PSPSym and LocAllocSPvar are not used by the CoreRT ABI if (!IsTargetAbi(CORINFO_CORERT_ABI)) { #if defined(FEATURE_EH_FUNCLETS) if (ehNeedsPSPSym()) { lvaPSPSym = lvaGrabTempWithImplicitUse(false DEBUGARG("PSPSym")); LclVarDsc* lclPSPSym = lvaGetDesc(lvaPSPSym); lclPSPSym->lvType = TYP_I_IMPL; lvaSetVarDoNotEnregister(lvaPSPSym DEBUGARG(DoNotEnregisterReason::VMNeedsStackAddr)); } #endif // FEATURE_EH_FUNCLETS #ifdef JIT32_GCENCODER // LocAllocSPvar is only required by the implicit frame layout expected by the VM on x86. Whether // a function contains a Localloc is conveyed in the GC information, in the InfoHdrSmall.localloc // field. The function must have an EBP frame. Then, the VM finds the LocAllocSP slot by assuming // the following stack layout: // // -- higher addresses -- // saved EBP <-- EBP points here // other callee-saved registers // InfoHdrSmall.savedRegsCountExclFP specifies this size // optional GS cookie // InfoHdrSmall.security is 1 if this exists // LocAllocSP slot // -- lower addresses -- // // See also eetwain.cpp::GetLocallocSPOffset() and its callers. if (compLocallocUsed) { lvaLocAllocSPvar = lvaGrabTempWithImplicitUse(false DEBUGARG("LocAllocSPvar")); LclVarDsc* locAllocSPvar = lvaGetDesc(lvaLocAllocSPvar); locAllocSPvar->lvType = TYP_I_IMPL; } #endif // JIT32_GCENCODER } // Ref counting is now enabled normally. lvaRefCountState = RCS_NORMAL; #if defined(DEBUG) const bool setSlotNumbers = true; #else const bool setSlotNumbers = opts.compScopeInfo && (info.compVarScopesCount > 0); #endif // defined(DEBUG) const bool isRecompute = false; lvaComputeRefCounts(isRecompute, setSlotNumbers); // If we don't need precise reference counts, e.g. we're not optimizing, we're done. if (!PreciseRefCountsRequired()) { return; } const bool reportParamTypeArg = lvaReportParamTypeArg(); // Update bookkeeping on the generic context. if (lvaKeepAliveAndReportThis()) { lvaGetDesc(0u)->lvImplicitlyReferenced = reportParamTypeArg; } else if (lvaReportParamTypeArg()) { // We should have a context arg. assert(info.compTypeCtxtArg != (int)BAD_VAR_NUM); lvaGetDesc(info.compTypeCtxtArg)->lvImplicitlyReferenced = reportParamTypeArg; } assert(PreciseRefCountsRequired()); // Note: optAddCopies() depends on lvaRefBlks, which is set in lvaMarkLocalVars(BasicBlock*), called above. optAddCopies(); } //------------------------------------------------------------------------ // lvaComputeRefCounts: compute ref counts for locals // // Arguments: // isRecompute -- true if we just want ref counts and no other side effects; // false means to also look for true boolean locals, lay // groundwork for assertion prop, check type consistency, etc. // See lvaMarkLclRefs for details on what else goes on. // setSlotNumbers -- true if local slot numbers should be assigned. // // Notes: // Some implicit references are given actual counts or weight bumps here // to match pre-existing behavior. // // In fast-jitting modes where we don't ref count locals, this bypasses // actual counting, and makes all locals implicitly referenced on first // compute. It asserts all locals are implicitly referenced on recompute. // // When optimizing we also recompute lvaGenericsContextInUse based // on specially flagged LCL_VAR appearances. // void Compiler::lvaComputeRefCounts(bool isRecompute, bool setSlotNumbers) { JITDUMP("\n*** lvaComputeRefCounts ***\n"); unsigned lclNum = 0; LclVarDsc* varDsc = nullptr; // Fast path for minopts and debug codegen. // // On first compute: mark all locals as implicitly referenced and untracked. // On recompute: do nothing. if (!PreciseRefCountsRequired()) { if (isRecompute) { #if defined(DEBUG) // All local vars should be marked as implicitly referenced // and not tracked. for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++) { const bool isSpecialVarargsParam = varDsc->lvIsParam && raIsVarargsStackArg(lclNum); if (isSpecialVarargsParam) { assert(varDsc->lvRefCnt() == 0); } else { assert(varDsc->lvImplicitlyReferenced); } assert(!varDsc->lvTracked); } #endif // defined (DEBUG) return; } // First compute. for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++) { // Using lvImplicitlyReferenced here ensures that we can't // accidentally make locals be unreferenced later by decrementing // the ref count to zero. // // If, in minopts/debug, we really want to allow locals to become // unreferenced later, we'll have to explicitly clear this bit. varDsc->setLvRefCnt(0); varDsc->setLvRefCntWtd(BB_ZERO_WEIGHT); // Special case for some varargs params ... these must // remain unreferenced. const bool isSpecialVarargsParam = varDsc->lvIsParam && raIsVarargsStackArg(lclNum); if (!isSpecialVarargsParam) { varDsc->lvImplicitlyReferenced = 1; } varDsc->lvTracked = 0; if (setSlotNumbers) { varDsc->lvSlotNum = lclNum; } // Assert that it's ok to bypass the type repair logic in lvaMarkLclRefs assert((varDsc->lvType != TYP_UNDEF) && (varDsc->lvType != TYP_VOID) && (varDsc->lvType != TYP_UNKNOWN)); } lvaCurEpoch++; lvaTrackedCount = 0; lvaTrackedCountInSizeTUnits = 0; return; } // Slower path we take when optimizing, to get accurate counts. // // First, reset all explicit ref counts and weights. for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++) { varDsc->setLvRefCnt(0); varDsc->setLvRefCntWtd(BB_ZERO_WEIGHT); if (setSlotNumbers) { varDsc->lvSlotNum = lclNum; } // Set initial value for lvSingleDef for explicit and implicit // argument locals as they are "defined" on entry. // However, if we are just recomputing the ref counts, retain the value // that was set by past phases. if (!isRecompute) { varDsc->lvSingleDef = varDsc->lvIsParam; varDsc->lvSingleDefRegCandidate = varDsc->lvIsParam; } } // Remember current state of generic context use, and prepare // to compute new state. const bool oldLvaGenericsContextInUse = lvaGenericsContextInUse; lvaGenericsContextInUse = false; JITDUMP("\n*** lvaComputeRefCounts -- explicit counts ***\n"); // Second, account for all explicit local variable references for (BasicBlock* const block : Blocks()) { if (block->IsLIR()) { assert(isRecompute); const weight_t weight = block->getBBWeight(this); for (GenTree* node : LIR::AsRange(block)) { switch (node->OperGet()) { case GT_LCL_VAR: case GT_LCL_FLD: case GT_LCL_VAR_ADDR: case GT_LCL_FLD_ADDR: case GT_STORE_LCL_VAR: case GT_STORE_LCL_FLD: { LclVarDsc* varDsc = lvaGetDesc(node->AsLclVarCommon()); // If this is an EH var, use a zero weight for defs, so that we don't // count those in our heuristic for register allocation, since they always // must be stored, so there's no value in enregistering them at defs; only // if there are enough uses to justify it. if (varDsc->lvLiveInOutOfHndlr && !varDsc->lvDoNotEnregister && ((node->gtFlags & GTF_VAR_DEF) != 0)) { varDsc->incRefCnts(0, this); } else { varDsc->incRefCnts(weight, this); } if ((node->gtFlags & GTF_VAR_CONTEXT) != 0) { assert(node->OperIs(GT_LCL_VAR)); lvaGenericsContextInUse = true; } break; } default: break; } } } else { lvaMarkLocalVars(block, isRecompute); } } if (oldLvaGenericsContextInUse && !lvaGenericsContextInUse) { // Context was in use but no longer is. This can happen // if we're able to optimize, so just leave a note. JITDUMP("\n** Generics context no longer in use\n"); } else if (lvaGenericsContextInUse && !oldLvaGenericsContextInUse) { // Context was not in use but now is. // // Changing from unused->used should never happen; creation of any new IR // for context use should also be settting lvaGenericsContextInUse. assert(!"unexpected new use of generics context"); } JITDUMP("\n*** lvaComputeRefCounts -- implicit counts ***\n"); // Third, bump ref counts for some implicit prolog references for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++) { // Todo: review justification for these count bumps. if (varDsc->lvIsRegArg) { if ((lclNum < info.compArgsCount) && (varDsc->lvRefCnt() > 0)) { // Fix 388376 ARM JitStress WP7 varDsc->incRefCnts(BB_UNITY_WEIGHT, this); varDsc->incRefCnts(BB_UNITY_WEIGHT, this); } // Ref count bump that was in lvaPromoteStructVar // // This was formerly done during RCS_EARLY counting, // and we did not used to reset counts like we do now. if (varDsc->lvIsStructField) { varDsc->incRefCnts(BB_UNITY_WEIGHT, this); } } // If we have JMP, all arguments must have a location // even if we don't use them inside the method if (compJmpOpUsed && varDsc->lvIsParam && (varDsc->lvRefCnt() == 0)) { // except when we have varargs and the argument is // passed on the stack. In that case, it's important // for the ref count to be zero, so that we don't attempt // to track them for GC info (which is not possible since we // don't know their offset in the stack). See the assert at the // end of raMarkStkVars and bug #28949 for more info. if (!raIsVarargsStackArg(lclNum)) { varDsc->lvImplicitlyReferenced = 1; } } } } void Compiler::lvaAllocOutgoingArgSpaceVar() { #if FEATURE_FIXED_OUT_ARGS // Setup the outgoing argument region, in case we end up using it later if (lvaOutgoingArgSpaceVar == BAD_VAR_NUM) { lvaOutgoingArgSpaceVar = lvaGrabTemp(false DEBUGARG("OutgoingArgSpace")); lvaTable[lvaOutgoingArgSpaceVar].lvType = TYP_LCLBLK; lvaTable[lvaOutgoingArgSpaceVar].lvImplicitlyReferenced = 1; } noway_assert(lvaOutgoingArgSpaceVar >= info.compLocalsCount && lvaOutgoingArgSpaceVar < lvaCount); #endif // FEATURE_FIXED_OUT_ARGS } inline void Compiler::lvaIncrementFrameSize(unsigned size) { if (size > MAX_FrameSize || compLclFrameSize + size > MAX_FrameSize) { BADCODE("Frame size overflow"); } compLclFrameSize += size; } /**************************************************************************** * * Return true if absolute offsets of temps are larger than vars, or in other * words, did we allocate temps before of after vars. The /GS buffer overrun * checks want temps to be at low stack addresses than buffers */ bool Compiler::lvaTempsHaveLargerOffsetThanVars() { #ifdef TARGET_ARM // We never want to place the temps with larger offsets for ARM return false; #else if (compGSReorderStackLayout) { return codeGen->isFramePointerUsed(); } else { return true; } #endif } /**************************************************************************** * * Return an upper bound estimate for the size of the compiler spill temps * */ unsigned Compiler::lvaGetMaxSpillTempSize() { unsigned result = 0; if (codeGen->regSet.hasComputedTmpSize()) { result = codeGen->regSet.tmpGetTotalSize(); } else { result = MAX_SPILL_TEMP_SIZE; } return result; } // clang-format off /***************************************************************************** * * Compute stack frame offsets for arguments, locals and optionally temps. * * The frame is laid out as follows for x86: * * ESP frames * * | | * |-----------------------| * | incoming | * | arguments | * |-----------------------| <---- Virtual '0' * | return address | * +=======================+ * |Callee saved registers | * |-----------------------| * | Temps | * |-----------------------| * | Variables | * |-----------------------| <---- Ambient ESP * | Arguments for the | * ~ next function ~ * | | * | | | * | | Stack grows | * | downward * V * * * EBP frames * * | | * |-----------------------| * | incoming | * | arguments | * |-----------------------| <---- Virtual '0' * | return address | * +=======================+ * | incoming EBP | * |-----------------------| <---- EBP * |Callee saved registers | * |-----------------------| * | security object | * |-----------------------| * | ParamTypeArg | * |-----------------------| * | Last-executed-filter | * |-----------------------| * | | * ~ Shadow SPs ~ * | | * |-----------------------| * | | * ~ Variables ~ * | | * ~-----------------------| * | Temps | * |-----------------------| * | localloc | * |-----------------------| <---- Ambient ESP * | Arguments for the | * | next function ~ * | | * | | | * | | Stack grows | * | downward * V * * * The frame is laid out as follows for x64: * * RSP frames * | | * |-----------------------| * | incoming | * | arguments | * |-----------------------| * | 4 fixed incoming | * | argument slots | * |-----------------------| <---- Caller's SP & Virtual '0' * | return address | * +=======================+ * | Callee saved Int regs | * ------------------------- * | Padding | <---- this padding (0 or 8 bytes) is to ensure flt registers are saved at a mem location aligned at 16-bytes * | | so that we can save 128-bit callee saved xmm regs using performant "movaps" instruction instead of "movups" * ------------------------- * | Callee saved Flt regs | <----- entire 128-bits of callee saved xmm registers are stored here * |-----------------------| * | Temps | * |-----------------------| * | Variables | * |-----------------------| * | Arguments for the | * ~ next function ~ * | | * |-----------------------| * | 4 fixed outgoing | * | argument slots | * |-----------------------| <---- Ambient RSP * | | | * ~ | Stack grows ~ * | | downward | * V * * * RBP frames * | | * |-----------------------| * | incoming | * | arguments | * |-----------------------| * | 4 fixed incoming | * | argument slots | * |-----------------------| <---- Caller's SP & Virtual '0' * | return address | * +=======================+ * | Callee saved Int regs | * ------------------------- * | Padding | * ------------------------- * | Callee saved Flt regs | * |-----------------------| * | security object | * |-----------------------| * | ParamTypeArg | * |-----------------------| * | | * | | * ~ Variables ~ * | | * | | * |-----------------------| * | Temps | * |-----------------------| * | | * ~ localloc ~ // not in frames with EH * | | * |-----------------------| * | PSPSym | // only in frames with EH (thus no localloc) * | | * |-----------------------| <---- RBP in localloc frames (max 240 bytes from Initial-SP) * | Arguments for the | * ~ next function ~ * | | * |-----------------------| * | 4 fixed outgoing | * | argument slots | * |-----------------------| <---- Ambient RSP (before localloc, this is Initial-SP) * | | | * ~ | Stack grows ~ * | | downward | * V * * * The frame is laid out as follows for ARM (this is a general picture; details may differ for different conditions): * * SP frames * | | * |-----------------------| * | incoming | * | arguments | * +=======================+ <---- Caller's SP * | Pre-spill registers | * |-----------------------| <---- Virtual '0' * |Callee saved registers | * |-----------------------| * ~ possible double align ~ * |-----------------------| * | security object | * |-----------------------| * | ParamTypeArg | * |-----------------------| * | possible GS cookie | * |-----------------------| * | Variables | * |-----------------------| * | possible GS cookie | * |-----------------------| * | Temps | * |-----------------------| * | Stub Argument Var | * |-----------------------| * |Inlined PInvoke Frame V| * |-----------------------| * ~ possible double align ~ * |-----------------------| * | Arguments for the | * ~ next function ~ * | | * |-----------------------| <---- Ambient SP * | | | * ~ | Stack grows ~ * | | downward | * V * * * FP / R11 frames * | | * |-----------------------| * | incoming | * | arguments | * +=======================+ <---- Caller's SP * | Pre-spill registers | * |-----------------------| <---- Virtual '0' * |Callee saved registers | * |-----------------------| * | PSPSym | // Only for frames with EH, which means FP-based frames * |-----------------------| * ~ possible double align ~ * |-----------------------| * | security object | * |-----------------------| * | ParamTypeArg | * |-----------------------| * | possible GS cookie | * |-----------------------| * | Variables | * |-----------------------| * | possible GS cookie | * |-----------------------| * | Temps | * |-----------------------| * | Stub Argument Var | * |-----------------------| * |Inlined PInvoke Frame V| * |-----------------------| * ~ possible double align ~ * |-----------------------| * | localloc | * |-----------------------| * | Arguments for the | * ~ next function ~ * | | * |-----------------------| <---- Ambient SP * | | | * ~ | Stack grows ~ * | | downward | * V * * * The frame is laid out as follows for ARM64 (this is a general picture; details may differ for different conditions): * NOTE: SP must be 16-byte aligned, so there may be alignment slots in the frame. * We will often save and establish a frame pointer to create better ETW stack walks. * * SP frames * | | * |-----------------------| * | incoming | * | arguments | * +=======================+ <---- Caller's SP * | homed | // this is only needed if reg argument need to be homed, e.g., for varargs * | register arguments | * |-----------------------| <---- Virtual '0' * |Callee saved registers | * | except fp/lr | * |-----------------------| * | security object | * |-----------------------| * | ParamTypeArg | * |-----------------------| * | possible GS cookie | * |-----------------------| * | Variables | * |-----------------------| * | possible GS cookie | * |-----------------------| * | Temps | * |-----------------------| * | Stub Argument Var | * |-----------------------| * |Inlined PInvoke Frame V| * |-----------------------| * | Saved LR | * |-----------------------| * | Saved FP | <---- Frame pointer * |-----------------------| * | Stack arguments for | * | the next function | * |-----------------------| <---- SP * | | | * ~ | Stack grows ~ * | | downward | * V * * * FP (R29 / x29) frames * | | * |-----------------------| * | incoming | * | arguments | * +=======================+ <---- Caller's SP * | optional homed | // this is only needed if reg argument need to be homed, e.g., for varargs * | register arguments | * |-----------------------| <---- Virtual '0' * |Callee saved registers | * | except fp/lr | * |-----------------------| * | PSPSym | // Only for frames with EH, which requires FP-based frames * |-----------------------| * | security object | * |-----------------------| * | ParamTypeArg | * |-----------------------| * | possible GS cookie | * |-----------------------| * | Variables | * |-----------------------| * | possible GS cookie | * |-----------------------| * | Temps | * |-----------------------| * | Stub Argument Var | * |-----------------------| * |Inlined PInvoke Frame V| * |-----------------------| * | Saved LR | * |-----------------------| * | Saved FP | <---- Frame pointer * |-----------------------| * ~ localloc ~ * |-----------------------| * | Stack arguments for | * | the next function | * |-----------------------| <---- Ambient SP * | | | * ~ | Stack grows ~ * | | downward | * V * * * FP (R29 / x29) frames where FP/LR are stored at the top of the frame (frames requiring GS that have localloc) * | | * |-----------------------| * | incoming | * | arguments | * +=======================+ <---- Caller's SP * | optional homed | // this is only needed if reg argument need to be homed, e.g., for varargs * | register arguments | * |-----------------------| <---- Virtual '0' * | Saved LR | * |-----------------------| * | Saved FP | <---- Frame pointer * |-----------------------| * |Callee saved registers | * |-----------------------| * | PSPSym | // Only for frames with EH, which requires FP-based frames * |-----------------------| * | security object | * |-----------------------| * | ParamTypeArg | * |-----------------------| * | possible GS cookie | * |-----------------------| * | Variables | * |-----------------------| * | possible GS cookie | * |-----------------------| * | Temps | * |-----------------------| * | Stub Argument Var | * |-----------------------| * |Inlined PInvoke Frame V| * |-----------------------| * ~ localloc ~ * |-----------------------| * | Stack arguments for | * | the next function | * |-----------------------| <---- Ambient SP * | | | * ~ | Stack grows ~ * | | downward | * V * * * Doing this all in one pass is 'hard'. So instead we do it in 2 basic passes: * 1. Assign all the offsets relative to the Virtual '0'. Offsets above (the * incoming arguments) are positive. Offsets below (everything else) are * negative. This pass also calcuates the total frame size (between Caller's * SP/return address and the Ambient SP). * 2. Figure out where to place the frame pointer, and then adjust the offsets * as needed for the final stack size and whether the offset is frame pointer * relative or stack pointer relative. * */ // clang-format on void Compiler::lvaAssignFrameOffsets(FrameLayoutState curState) { noway_assert((lvaDoneFrameLayout < curState) || (curState == REGALLOC_FRAME_LAYOUT)); lvaDoneFrameLayout = curState; #ifdef DEBUG if (verbose) { printf("*************** In lvaAssignFrameOffsets"); if (curState == INITIAL_FRAME_LAYOUT) { printf("(INITIAL_FRAME_LAYOUT)"); } else if (curState == PRE_REGALLOC_FRAME_LAYOUT) { printf("(PRE_REGALLOC_FRAME_LAYOUT)"); } else if (curState == REGALLOC_FRAME_LAYOUT) { printf("(REGALLOC_FRAME_LAYOUT)"); } else if (curState == TENTATIVE_FRAME_LAYOUT) { printf("(TENTATIVE_FRAME_LAYOUT)"); } else if (curState == FINAL_FRAME_LAYOUT) { printf("(FINAL_FRAME_LAYOUT)"); } else { printf("(UNKNOWN)"); unreached(); } printf("\n"); } #endif #if FEATURE_FIXED_OUT_ARGS assert(lvaOutgoingArgSpaceVar != BAD_VAR_NUM); #endif // FEATURE_FIXED_OUT_ARGS /*------------------------------------------------------------------------- * * First process the arguments. * *------------------------------------------------------------------------- */ lvaAssignVirtualFrameOffsetsToArgs(); /*------------------------------------------------------------------------- * * Now compute stack offsets for any variables that don't live in registers * *------------------------------------------------------------------------- */ lvaAssignVirtualFrameOffsetsToLocals(); lvaAlignFrame(); /*------------------------------------------------------------------------- * * Now patch the offsets * *------------------------------------------------------------------------- */ lvaFixVirtualFrameOffsets(); // Modify the stack offset for fields of promoted structs. lvaAssignFrameOffsetsToPromotedStructs(); /*------------------------------------------------------------------------- * * Finalize * *------------------------------------------------------------------------- */ // If it's not the final frame layout, then it's just an estimate. This means // we're allowed to once again write to these variables, even if we've read // from them to make tentative code generation or frame layout decisions. if (curState < FINAL_FRAME_LAYOUT) { codeGen->resetFramePointerUsedWritePhase(); } } /***************************************************************************** * lvaFixVirtualFrameOffsets() : Now that everything has a virtual offset, * determine the final value for the frame pointer (if needed) and then * adjust all the offsets appropriately. * * This routine fixes virtual offset to be relative to frame pointer or SP * based on whether varDsc->lvFramePointerBased is true or false respectively. */ void Compiler::lvaFixVirtualFrameOffsets() { LclVarDsc* varDsc; #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_AMD64) if (lvaPSPSym != BAD_VAR_NUM) { // We need to fix the offset of the PSPSym so there is no padding between it and the outgoing argument space. // Without this code, lvaAlignFrame might have put the padding lower than the PSPSym, which would be between // the PSPSym and the outgoing argument space. varDsc = lvaGetDesc(lvaPSPSym); assert(varDsc->lvFramePointerBased); // We always access it RBP-relative. assert(!varDsc->lvMustInit); // It is never "must init". varDsc->SetStackOffset(codeGen->genCallerSPtoInitialSPdelta() + lvaLclSize(lvaOutgoingArgSpaceVar)); if (opts.IsOSR()) { // With OSR RBP points at the base of the OSR frame, but the virtual offsets // are from the base of the Tier0 frame. Adjust. // varDsc->SetStackOffset(varDsc->GetStackOffset() - info.compPatchpointInfo->TotalFrameSize()); } } #endif // The delta to be added to virtual offset to adjust it relative to frame pointer or SP int delta = 0; #ifdef TARGET_XARCH delta += REGSIZE_BYTES; // pushed PC (return address) for x86/x64 JITDUMP("--- delta bump %d for RA\n", REGSIZE_BYTES); if (codeGen->doubleAlignOrFramePointerUsed()) { JITDUMP("--- delta bump %d for FP\n", REGSIZE_BYTES); delta += REGSIZE_BYTES; // pushed EBP (frame pointer) } #endif if (!codeGen->isFramePointerUsed()) { // pushed registers, return address, and padding JITDUMP("--- delta bump %d for RSP frame\n", codeGen->genTotalFrameSize()); delta += codeGen->genTotalFrameSize(); } #if defined(TARGET_ARM) else { // We set FP to be after LR, FP delta += 2 * REGSIZE_BYTES; } #elif defined(TARGET_AMD64) || defined(TARGET_ARM64) else { // FP is used. JITDUMP("--- delta bump %d for FP frame\n", codeGen->genTotalFrameSize() - codeGen->genSPtoFPdelta()); delta += codeGen->genTotalFrameSize() - codeGen->genSPtoFPdelta(); } #endif // TARGET_AMD64 if (opts.IsOSR()) { #if defined(TARGET_AMD64) || defined(TARGET_ARM64) // Stack offset includes Tier0 frame. // JITDUMP("--- delta bump %d for OSR + Tier0 frame\n", info.compPatchpointInfo->TotalFrameSize()); delta += info.compPatchpointInfo->TotalFrameSize(); #endif } JITDUMP("--- virtual stack offset to actual stack offset delta is %d\n", delta); unsigned lclNum; for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++) { bool doAssignStkOffs = true; // Can't be relative to EBP unless we have an EBP noway_assert(!varDsc->lvFramePointerBased || codeGen->doubleAlignOrFramePointerUsed()); // Is this a non-param promoted struct field? // if so then set doAssignStkOffs to false. // if (varDsc->lvIsStructField) { LclVarDsc* parentvarDsc = lvaGetDesc(varDsc->lvParentLcl); lvaPromotionType promotionType = lvaGetPromotionType(parentvarDsc); #if defined(TARGET_X86) // On x86, we set the stack offset for a promoted field // to match a struct parameter in lvAssignFrameOffsetsToPromotedStructs. if ((!varDsc->lvIsParam || parentvarDsc->lvIsParam) && promotionType == PROMOTION_TYPE_DEPENDENT) #else if (!varDsc->lvIsParam && promotionType == PROMOTION_TYPE_DEPENDENT) #endif { doAssignStkOffs = false; // Assigned later in lvaAssignFrameOffsetsToPromotedStructs() } } if (!varDsc->lvOnFrame) { if (!varDsc->lvIsParam #if !defined(TARGET_AMD64) || (varDsc->lvIsRegArg #if defined(TARGET_ARM) && defined(PROFILING_SUPPORTED) && compIsProfilerHookNeeded() && !lvaIsPreSpilled(lclNum, codeGen->regSet.rsMaskPreSpillRegs(false)) // We need assign stack offsets // for prespilled arguments #endif ) #endif // !defined(TARGET_AMD64) ) { doAssignStkOffs = false; // Not on frame or an incomming stack arg } } if (doAssignStkOffs) { JITDUMP("-- V%02u was %d, now %d\n", lclNum, varDsc->GetStackOffset(), varDsc->GetStackOffset() + delta); varDsc->SetStackOffset(varDsc->GetStackOffset() + delta); #if DOUBLE_ALIGN if (genDoubleAlign() && !codeGen->isFramePointerUsed()) { if (varDsc->lvFramePointerBased) { varDsc->SetStackOffset(varDsc->GetStackOffset() - delta); // We need to re-adjust the offsets of the parameters so they are EBP // relative rather than stack/frame pointer relative varDsc->SetStackOffset(varDsc->GetStackOffset() + (2 * TARGET_POINTER_SIZE)); // return address and pushed EBP noway_assert(varDsc->GetStackOffset() >= FIRST_ARG_STACK_OFFS); } } #endif // On System V environments the stkOffs could be 0 for params passed in registers. // // For normal methods only EBP relative references can have negative offsets. assert(codeGen->isFramePointerUsed() || varDsc->GetStackOffset() >= 0); } } assert(codeGen->regSet.tmpAllFree()); for (TempDsc* temp = codeGen->regSet.tmpListBeg(); temp != nullptr; temp = codeGen->regSet.tmpListNxt(temp)) { temp->tdAdjustTempOffs(delta); } lvaCachedGenericContextArgOffs += delta; #if FEATURE_FIXED_OUT_ARGS if (lvaOutgoingArgSpaceVar != BAD_VAR_NUM) { varDsc = lvaGetDesc(lvaOutgoingArgSpaceVar); varDsc->SetStackOffset(0); varDsc->lvFramePointerBased = false; varDsc->lvMustInit = false; } #endif // FEATURE_FIXED_OUT_ARGS #ifdef TARGET_ARM64 // We normally add alignment below the locals between them and the outgoing // arg space area. When we store fp/lr at the bottom, however, this will be // below the alignment. So we should not apply the alignment adjustment to // them. On ARM64 it turns out we always store these at +0 and +8 of the FP, // so instead of dealing with skipping adjustment just for them we just set // them here always. assert(codeGen->isFramePointerUsed()); if (lvaRetAddrVar != BAD_VAR_NUM) { lvaTable[lvaRetAddrVar].SetStackOffset(REGSIZE_BYTES); } #endif } #ifdef TARGET_ARM bool Compiler::lvaIsPreSpilled(unsigned lclNum, regMaskTP preSpillMask) { const LclVarDsc& desc = lvaTable[lclNum]; return desc.lvIsRegArg && (preSpillMask & genRegMask(desc.GetArgReg())); } #endif // TARGET_ARM //------------------------------------------------------------------------ // lvaUpdateArgWithInitialReg: Set the initial register of a local variable // to the one assigned by the register allocator. // // Arguments: // varDsc - the local variable descriptor // void Compiler::lvaUpdateArgWithInitialReg(LclVarDsc* varDsc) { noway_assert(varDsc->lvIsParam); if (varDsc->lvIsRegCandidate()) { varDsc->SetRegNum(varDsc->GetArgInitReg()); } } //------------------------------------------------------------------------ // lvaUpdateArgsWithInitialReg() : For each argument variable descriptor, update // its current register with the initial register as assigned by LSRA. // void Compiler::lvaUpdateArgsWithInitialReg() { if (!compLSRADone) { return; } for (unsigned lclNum = 0; lclNum < info.compArgsCount; lclNum++) { LclVarDsc* varDsc = lvaGetDesc(lclNum); if (varDsc->lvPromotedStruct()) { for (unsigned fieldVarNum = varDsc->lvFieldLclStart; fieldVarNum < varDsc->lvFieldLclStart + varDsc->lvFieldCnt; ++fieldVarNum) { LclVarDsc* fieldVarDsc = lvaGetDesc(fieldVarNum); lvaUpdateArgWithInitialReg(fieldVarDsc); } } else { lvaUpdateArgWithInitialReg(varDsc); } } } /***************************************************************************** * lvaAssignVirtualFrameOffsetsToArgs() : Assign virtual stack offsets to the * arguments, and implicit arguments (this ptr, return buffer, generics, * and varargs). */ void Compiler::lvaAssignVirtualFrameOffsetsToArgs() { unsigned lclNum = 0; int argOffs = 0; #ifdef UNIX_AMD64_ABI int callerArgOffset = 0; #endif // UNIX_AMD64_ABI /* Assign stack offsets to arguments (in reverse order of passing). This means that if we pass arguments left->right, we start at the end of the list and work backwards, for right->left we start with the first argument and move forward. This is all relative to our Virtual '0' */ if (info.compArgOrder == Target::ARG_ORDER_L2R) { argOffs = compArgSize; } /* Update the argOffs to reflect arguments that are passed in registers */ noway_assert(codeGen->intRegState.rsCalleeRegArgCount <= MAX_REG_ARG); noway_assert(compMacOsArm64Abi() || compArgSize >= codeGen->intRegState.rsCalleeRegArgCount * REGSIZE_BYTES); if (info.compArgOrder == Target::ARG_ORDER_L2R) { argOffs -= codeGen->intRegState.rsCalleeRegArgCount * REGSIZE_BYTES; } // Update the arg initial register locations. lvaUpdateArgsWithInitialReg(); /* Is there a "this" argument? */ if (!info.compIsStatic) { noway_assert(lclNum == info.compThisArg); #ifndef TARGET_X86 argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum, REGSIZE_BYTES, argOffs UNIX_AMD64_ABI_ONLY_ARG(&callerArgOffset)); #endif // TARGET_X86 lclNum++; } unsigned userArgsToSkip = 0; #if !defined(TARGET_ARM) // In the native instance method calling convention on Windows, // the this parameter comes before the hidden return buffer parameter. // So, we want to process the native "this" parameter before we process // the native return buffer parameter. if (TargetOS::IsWindows && callConvIsInstanceMethodCallConv(info.compCallConv)) { #ifdef TARGET_X86 if (!lvaTable[lclNum].lvIsRegArg) { argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum, REGSIZE_BYTES, argOffs); } #elif !defined(UNIX_AMD64_ABI) argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum, REGSIZE_BYTES, argOffs); #endif // TARGET_X86 lclNum++; userArgsToSkip++; } #endif /* if we have a hidden buffer parameter, that comes here */ if (info.compRetBuffArg != BAD_VAR_NUM) { noway_assert(lclNum == info.compRetBuffArg); argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum, REGSIZE_BYTES, argOffs UNIX_AMD64_ABI_ONLY_ARG(&callerArgOffset)); lclNum++; } #if USER_ARGS_COME_LAST //@GENERICS: extra argument for instantiation info if (info.compMethodInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) { noway_assert(lclNum == (unsigned)info.compTypeCtxtArg); argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum++, REGSIZE_BYTES, argOffs UNIX_AMD64_ABI_ONLY_ARG(&callerArgOffset)); } if (info.compIsVarArgs) { argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum++, REGSIZE_BYTES, argOffs UNIX_AMD64_ABI_ONLY_ARG(&callerArgOffset)); } #endif // USER_ARGS_COME_LAST CORINFO_ARG_LIST_HANDLE argLst = info.compMethodInfo->args.args; unsigned argSigLen = info.compMethodInfo->args.numArgs; // Skip any user args that we've already processed. assert(userArgsToSkip <= argSigLen); argSigLen -= userArgsToSkip; for (unsigned i = 0; i < userArgsToSkip; i++, argLst = info.compCompHnd->getArgNext(argLst)) { ; } #ifdef TARGET_ARM // // struct_n { int; int; ... n times }; // // Consider signature: // // Foo (float a,double b,float c,double d,float e,double f,float g,double h, // float i,double j,float k,double l,struct_3 m) { } // // Basically the signature is: (all float regs full, 1 double, struct_3); // // The double argument occurs before pre spill in the argument iteration and // computes an argOffset of 0. struct_3 offset becomes 8. This is wrong. // Because struct_3 is prespilled and double occurs after prespill. // The correct offsets are double = 16 (aligned stk), struct_3 = 0..12, // Offset 12 will be skipped for double alignment of double. // // Another example is (struct_2, all float regs full, double, struct_2); // Here, notice the order is similarly messed up because of 2 pre-spilled // struct_2. // // Succinctly, // ARG_INDEX(i) > ARG_INDEX(j) DOES NOT IMPLY |ARG_OFFSET(i)| > |ARG_OFFSET(j)| // // Therefore, we'll do a two pass offset calculation, one that considers pre-spill // and the next, stack args. // unsigned argLcls = 0; // Take care of pre spill registers first. regMaskTP preSpillMask = codeGen->regSet.rsMaskPreSpillRegs(false); regMaskTP tempMask = RBM_NONE; for (unsigned i = 0, preSpillLclNum = lclNum; i < argSigLen; ++i, ++preSpillLclNum) { if (lvaIsPreSpilled(preSpillLclNum, preSpillMask)) { unsigned argSize = eeGetArgSize(argLst, &info.compMethodInfo->args); argOffs = lvaAssignVirtualFrameOffsetToArg(preSpillLclNum, argSize, argOffs); argLcls++; // Early out if we can. If size is 8 and base reg is 2, then the mask is 0x1100 tempMask |= ((((1 << (roundUp(argSize, TARGET_POINTER_SIZE) / REGSIZE_BYTES))) - 1) << lvaTable[preSpillLclNum].GetArgReg()); if (tempMask == preSpillMask) { // We won't encounter more pre-spilled registers, // so don't bother iterating further. break; } } argLst = info.compCompHnd->getArgNext(argLst); } // Take care of non pre-spilled stack arguments. argLst = info.compMethodInfo->args.args; for (unsigned i = 0, stkLclNum = lclNum; i < argSigLen; ++i, ++stkLclNum) { if (!lvaIsPreSpilled(stkLclNum, preSpillMask)) { const unsigned argSize = eeGetArgSize(argLst, &info.compMethodInfo->args); argOffs = lvaAssignVirtualFrameOffsetToArg(stkLclNum, argSize, argOffs); argLcls++; } argLst = info.compCompHnd->getArgNext(argLst); } lclNum += argLcls; #else // !TARGET_ARM for (unsigned i = 0; i < argSigLen; i++) { unsigned argumentSize = eeGetArgSize(argLst, &info.compMethodInfo->args); assert(compMacOsArm64Abi() || argumentSize % TARGET_POINTER_SIZE == 0); argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum++, argumentSize, argOffs UNIX_AMD64_ABI_ONLY_ARG(&callerArgOffset)); argLst = info.compCompHnd->getArgNext(argLst); } #endif // !TARGET_ARM #if !USER_ARGS_COME_LAST //@GENERICS: extra argument for instantiation info if (info.compMethodInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) { noway_assert(lclNum == (unsigned)info.compTypeCtxtArg); argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum++, REGSIZE_BYTES, argOffs UNIX_AMD64_ABI_ONLY_ARG(&callerArgOffset)); } if (info.compIsVarArgs) { argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum++, REGSIZE_BYTES, argOffs UNIX_AMD64_ABI_ONLY_ARG(&callerArgOffset)); } #endif // USER_ARGS_COME_LAST } #ifdef UNIX_AMD64_ABI // // lvaAssignVirtualFrameOffsetToArg() : Assign virtual stack offsets to an // individual argument, and return the offset for the next argument. // Note: This method only calculates the initial offset of the stack passed/spilled arguments // (if any - the RA might decide to spill(home on the stack) register passed arguments, if rarely used.) // The final offset is calculated in lvaFixVirtualFrameOffsets method. It accounts for FP existance, // ret address slot, stack frame padding, alloca instructions, etc. // Note: This is the implementation for UNIX_AMD64 System V platforms. // int Compiler::lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, unsigned argSize, int argOffs UNIX_AMD64_ABI_ONLY_ARG(int* callerArgOffset)) { noway_assert(lclNum < info.compArgsCount); noway_assert(argSize); if (info.compArgOrder == Target::ARG_ORDER_L2R) { argOffs -= argSize; } unsigned fieldVarNum = BAD_VAR_NUM; LclVarDsc* varDsc = lvaGetDesc(lclNum); noway_assert(varDsc->lvIsParam); if (varDsc->lvIsRegArg) { // Argument is passed in a register, don't count it // when updating the current offset on the stack. if (varDsc->lvOnFrame) { // The offset for args needs to be set only for the stack homed arguments for System V. varDsc->SetStackOffset(argOffs); } else { varDsc->SetStackOffset(0); } } else { // For Windows AMD64 there are 4 slots for the register passed arguments on the top of the caller's stack. // This is where they are always homed. So, they can be accessed with positive offset. // On System V platforms, if the RA decides to home a register passed arg on the stack, it creates a stack // location on the callee stack (like any other local var.) In such a case, the register passed, stack homed // arguments are accessed using negative offsets and the stack passed arguments are accessed using positive // offset (from the caller's stack.) // For System V platforms if there is no frame pointer the caller stack parameter offset should include the // callee allocated space. If frame register is used, the callee allocated space should not be included for // accessing the caller stack parameters. The last two requirements are met in lvaFixVirtualFrameOffsets // method, which fixes the offsets, based on frame pointer existence, existence of alloca instructions, ret // address pushed, ets. varDsc->SetStackOffset(*callerArgOffset); // Structs passed on stack could be of size less than TARGET_POINTER_SIZE. // Make sure they get at least TARGET_POINTER_SIZE on the stack - this is required for alignment. if (argSize > TARGET_POINTER_SIZE) { *callerArgOffset += (int)roundUp(argSize, TARGET_POINTER_SIZE); } else { *callerArgOffset += TARGET_POINTER_SIZE; } } // For struct promoted parameters we need to set the offsets for the field lclVars. // // For a promoted struct we also assign the struct fields stack offset if (varDsc->lvPromotedStruct()) { unsigned firstFieldNum = varDsc->lvFieldLclStart; int offset = varDsc->GetStackOffset(); for (unsigned i = 0; i < varDsc->lvFieldCnt; i++) { LclVarDsc* fieldVarDsc = lvaGetDesc(firstFieldNum + i); fieldVarDsc->SetStackOffset(offset + fieldVarDsc->lvFldOffset); } } if (info.compArgOrder == Target::ARG_ORDER_R2L && !varDsc->lvIsRegArg) { argOffs += argSize; } return argOffs; } #else // !UNIX_AMD64_ABI // // lvaAssignVirtualFrameOffsetToArg() : Assign virtual stack offsets to an // individual argument, and return the offset for the next argument. // Note: This method only calculates the initial offset of the stack passed/spilled arguments // (if any - the RA might decide to spill(home on the stack) register passed arguments, if rarely used.) // The final offset is calculated in lvaFixVirtualFrameOffsets method. It accounts for FP existance, // ret address slot, stack frame padding, alloca instructions, etc. // Note: This implementation for all the platforms but UNIX_AMD64 OSs (System V 64 bit.) int Compiler::lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, unsigned argSize, int argOffs UNIX_AMD64_ABI_ONLY_ARG(int* callerArgOffset)) { noway_assert(lclNum < info.compArgsCount); noway_assert(argSize); if (info.compArgOrder == Target::ARG_ORDER_L2R) { argOffs -= argSize; } unsigned fieldVarNum = BAD_VAR_NUM; LclVarDsc* varDsc = lvaGetDesc(lclNum); noway_assert(varDsc->lvIsParam); if (varDsc->lvIsRegArg) { /* Argument is passed in a register, don't count it * when updating the current offset on the stack */ CLANG_FORMAT_COMMENT_ANCHOR; #if !defined(TARGET_ARMARCH) #if DEBUG // TODO: Remove this noway_assert and replace occurrences of TARGET_POINTER_SIZE with argSize // Also investigate why we are incrementing argOffs for X86 as this seems incorrect // noway_assert(argSize == TARGET_POINTER_SIZE); #endif // DEBUG #endif #if defined(TARGET_X86) argOffs += TARGET_POINTER_SIZE; #elif defined(TARGET_AMD64) // Register arguments on AMD64 also takes stack space. (in the backing store) varDsc->SetStackOffset(argOffs); argOffs += TARGET_POINTER_SIZE; #elif defined(TARGET_ARM64) // Register arguments on ARM64 only take stack space when they have a frame home. // Unless on windows and in a vararg method. if (compFeatureArgSplit() && this->info.compIsVarArgs) { if (varDsc->lvType == TYP_STRUCT && varDsc->GetOtherArgReg() >= MAX_REG_ARG && varDsc->GetOtherArgReg() != REG_NA) { // This is a split struct. It will account for an extra (8 bytes) // of alignment. varDsc->SetStackOffset(varDsc->GetStackOffset() + TARGET_POINTER_SIZE); argOffs += TARGET_POINTER_SIZE; } } #elif defined(TARGET_ARM) // On ARM we spill the registers in codeGen->regSet.rsMaskPreSpillRegArg // in the prolog, so we have to do SetStackOffset() here // regMaskTP regMask = genRegMask(varDsc->GetArgReg()); if (codeGen->regSet.rsMaskPreSpillRegArg & regMask) { // Signature: void foo(struct_8, int, struct_4) // ------- CALLER SP ------- // r3 struct_4 // r2 int - not prespilled, but added for alignment. argOffs should skip this. // r1 struct_8 // r0 struct_8 // ------------------------- // If we added alignment we need to fix argOffs for all registers above alignment. if (codeGen->regSet.rsMaskPreSpillAlign != RBM_NONE) { assert(genCountBits(codeGen->regSet.rsMaskPreSpillAlign) == 1); // Is register beyond the alignment pos? if (regMask > codeGen->regSet.rsMaskPreSpillAlign) { // Increment argOffs just once for the _first_ register after alignment pos // in the prespill mask. if (!BitsBetween(codeGen->regSet.rsMaskPreSpillRegArg, regMask, codeGen->regSet.rsMaskPreSpillAlign)) { argOffs += TARGET_POINTER_SIZE; } } } switch (varDsc->lvType) { case TYP_STRUCT: if (!varDsc->lvStructDoubleAlign) { break; } FALLTHROUGH; case TYP_DOUBLE: case TYP_LONG: { // // Let's assign offsets to arg1, a double in r2. argOffs has to be 4 not 8. // // ------- CALLER SP ------- // r3 // r2 double -- argOffs = 4, but it doesn't need to be skipped, because there is no skipping. // r1 VACookie -- argOffs = 0 // ------------------------- // // Consider argOffs as if it accounts for number of prespilled registers before the current // register. In the above example, for r2, it is r1 that is prespilled, but since r1 is // accounted for by argOffs being 4, there should have been no skipping. Instead, if we didn't // assign r1 to any variable, then argOffs would still be 0 which implies it is not accounting // for r1, equivalently r1 is skipped. // // If prevRegsSize is unaccounted for by a corresponding argOffs, we must have skipped a register. int prevRegsSize = genCountBits(codeGen->regSet.rsMaskPreSpillRegArg & (regMask - 1)) * TARGET_POINTER_SIZE; if (argOffs < prevRegsSize) { // We must align up the argOffset to a multiple of 8 to account for skipped registers. argOffs = roundUp((unsigned)argOffs, 2 * TARGET_POINTER_SIZE); } // We should've skipped only a single register. assert(argOffs == prevRegsSize); } break; default: // No alignment of argOffs required break; } varDsc->SetStackOffset(argOffs); argOffs += argSize; } #else // TARGET* #error Unsupported or unset target architecture #endif // TARGET* } else { #if defined(TARGET_ARM) // Dev11 Bug 42817: incorrect codegen for DrawFlatCheckBox causes A/V in WinForms // // Here we have method with a signature (int a1, struct a2, struct a3, int a4, int a5). // Struct parameter 'a2' is 16-bytes with no alignment requirements; // it uses r1,r2,r3 and [OutArg+0] when passed. // Struct parameter 'a3' is 16-bytes that is required to be double aligned; // the caller skips [OutArg+4] and starts the argument at [OutArg+8]. // Thus the caller generates the correct code to pass the arguments. // When generating code to receive the arguments we set codeGen->regSet.rsMaskPreSpillRegArg to [r1,r2,r3] // and spill these three registers as the first instruction in the prolog. // Then when we layout the arguments' stack offsets we have an argOffs 0 which // points at the location that we spilled r1 into the stack. For this first // struct we take the lvIsRegArg path above with "codeGen->regSet.rsMaskPreSpillRegArg &" matching. // Next when we calculate the argOffs for the second 16-byte struct we have an argOffs // of 16, which appears to be aligned properly so we don't skip a stack slot. // // To fix this we must recover the actual OutArg offset by subtracting off the // sizeof of the PreSpill register args. // Then we align this offset to a multiple of 8 and add back the sizeof // of the PreSpill register args. // // Dev11 Bug 71767: failure of assert(sizeofPreSpillRegArgs <= argOffs) // // We have a method with 'this' passed in r0, RetBuf arg in r1, VarArgs cookie // in r2. The first user arg is a 144 byte struct with double alignment required, // r3 is skipped, and the struct is passed on the stack. However, 'r3' is added // to the codeGen->regSet.rsMaskPreSpillRegArg mask by the VarArgs cookie code, since we need to // home all the potential varargs arguments in registers, even if we don't have // signature type information for the variadic arguments. However, due to alignment, // we have skipped a register that doesn't have a corresponding symbol. Make up // for that by increasing argOffs here. // int sizeofPreSpillRegArgs = genCountBits(codeGen->regSet.rsMaskPreSpillRegs(true)) * REGSIZE_BYTES; if (argOffs < sizeofPreSpillRegArgs) { // This can only happen if we skipped the last register spot because current stk arg // is a struct requiring alignment or a pre-spill alignment was required because the // first reg arg needed alignment. // // Example 1: First Stk Argument requiring alignment in vararg case (same as above comment.) // Signature (int a0, int a1, int a2, struct {long} a3, ...) // // stk arg a3 --> argOffs here will be 12 (r0-r2) but pre-spill will be 16. // ---- Caller SP ---- // r3 --> Stack slot is skipped in this case. // r2 int a2 // r1 int a1 // r0 int a0 // // Example 2: First Reg Argument requiring alignment in no-vararg case. // Signature (struct {long} a0, struct {int} a1, int a2, int a3) // // stk arg --> argOffs here will be 12 {r0-r2} but pre-spill will be 16. // ---- Caller SP ---- // r3 int a2 --> pushed (not pre-spilled) for alignment of a0 by lvaInitUserArgs. // r2 struct { int } a1 // r0-r1 struct { long } a0 CLANG_FORMAT_COMMENT_ANCHOR; #ifdef PROFILING_SUPPORTED // On Arm under profiler, r0-r3 are always prespilled on stack. // It is possible to have methods that accept only HFAs as parameters e.g. Signature(struct hfa1, struct // hfa2), in which case hfa1 and hfa2 will be en-registered in co-processor registers and will have an // argument offset less than size of preSpill. // // For this reason the following conditions are asserted when not under profiler. if (!compIsProfilerHookNeeded()) #endif { bool cond = ((info.compIsVarArgs || opts.compUseSoftFP) && // Does cur stk arg require double alignment? ((varDsc->lvType == TYP_STRUCT && varDsc->lvStructDoubleAlign) || (varDsc->lvType == TYP_DOUBLE) || (varDsc->lvType == TYP_LONG))) || // Did first reg arg require alignment? (codeGen->regSet.rsMaskPreSpillAlign & genRegMask(REG_ARG_LAST)); noway_assert(cond); noway_assert(sizeofPreSpillRegArgs <= argOffs + TARGET_POINTER_SIZE); // at most one register of alignment } argOffs = sizeofPreSpillRegArgs; } noway_assert(argOffs >= sizeofPreSpillRegArgs); int argOffsWithoutPreSpillRegArgs = argOffs - sizeofPreSpillRegArgs; switch (varDsc->lvType) { case TYP_STRUCT: if (!varDsc->lvStructDoubleAlign) break; FALLTHROUGH; case TYP_DOUBLE: case TYP_LONG: // We must align up the argOffset to a multiple of 8 argOffs = roundUp((unsigned)argOffsWithoutPreSpillRegArgs, 2 * TARGET_POINTER_SIZE) + sizeofPreSpillRegArgs; break; default: // No alignment of argOffs required break; } #endif // TARGET_ARM const bool isFloatHfa = (varDsc->lvIsHfa() && (varDsc->GetHfaType() == TYP_FLOAT)); const unsigned argAlignment = eeGetArgAlignment(varDsc->lvType, isFloatHfa); if (compMacOsArm64Abi()) { argOffs = roundUp(argOffs, argAlignment); } assert((argSize % argAlignment) == 0); assert((argOffs % argAlignment) == 0); varDsc->SetStackOffset(argOffs); } // For struct promoted parameters we need to set the offsets for both LclVars. // // For a dependent promoted struct we also assign the struct fields stack offset CLANG_FORMAT_COMMENT_ANCHOR; #if !defined(TARGET_64BIT) if ((varDsc->TypeGet() == TYP_LONG) && varDsc->lvPromoted) { noway_assert(varDsc->lvFieldCnt == 2); fieldVarNum = varDsc->lvFieldLclStart; lvaTable[fieldVarNum].SetStackOffset(varDsc->GetStackOffset()); lvaTable[fieldVarNum + 1].SetStackOffset(varDsc->GetStackOffset() + genTypeSize(TYP_INT)); } else #endif // !defined(TARGET_64BIT) if (varDsc->lvPromotedStruct()) { unsigned firstFieldNum = varDsc->lvFieldLclStart; for (unsigned i = 0; i < varDsc->lvFieldCnt; i++) { LclVarDsc* fieldVarDsc = lvaGetDesc(firstFieldNum + i); JITDUMP("Adjusting offset of dependent V%02u of arg V%02u: parent %u field %u net %u\n", lclNum, firstFieldNum + i, varDsc->GetStackOffset(), fieldVarDsc->lvFldOffset, varDsc->GetStackOffset() + fieldVarDsc->lvFldOffset); fieldVarDsc->SetStackOffset(varDsc->GetStackOffset() + fieldVarDsc->lvFldOffset); } } if (info.compArgOrder == Target::ARG_ORDER_R2L && !varDsc->lvIsRegArg) { argOffs += argSize; } return argOffs; } #endif // !UNIX_AMD64_ABI //----------------------------------------------------------------------------- // lvaAssingVirtualFrameOffsetsToLocals: compute the virtual stack offsets for // all elements on the stackframe. // // Notes: // Can be called multiple times. Early calls can be used to estimate various // frame offsets, but details may change. // void Compiler::lvaAssignVirtualFrameOffsetsToLocals() { // (1) Account for things that are set up by the prolog and undone by the epilog. // int stkOffs = 0; int originalFrameStkOffs = 0; int originalFrameSize = 0; // codeGen->isFramePointerUsed is set in regalloc phase. Initialize it to a guess for pre-regalloc layout. if (lvaDoneFrameLayout <= PRE_REGALLOC_FRAME_LAYOUT) { codeGen->setFramePointerUsed(codeGen->isFramePointerRequired()); } #ifdef TARGET_ARM64 // Decide where to save FP and LR registers. We store FP/LR registers at the bottom of the frame if there is // a frame pointer used (so we get positive offsets from the frame pointer to access locals), but not if we // need a GS cookie AND localloc is used, since we need the GS cookie to protect the saved return value, // and also the saved frame pointer. See CodeGen::genPushCalleeSavedRegisters() for more details about the // frame types. Since saving FP/LR at high addresses is a relatively rare case, force using it during stress. // (It should be legal to use these frame types for every frame). if (opts.compJitSaveFpLrWithCalleeSavedRegisters == 0) { // Default configuration codeGen->SetSaveFpLrWithAllCalleeSavedRegisters((getNeedsGSSecurityCookie() && compLocallocUsed) || compStressCompile(STRESS_GENERIC_VARN, 20)); } else if (opts.compJitSaveFpLrWithCalleeSavedRegisters == 1) { codeGen->SetSaveFpLrWithAllCalleeSavedRegisters(false); // Disable using new frames } else if (opts.compJitSaveFpLrWithCalleeSavedRegisters == 2) { codeGen->SetSaveFpLrWithAllCalleeSavedRegisters(true); // Force using new frames } #endif // TARGET_ARM64 #ifdef TARGET_XARCH // On x86/amd64, the return address has already been pushed by the call instruction in the caller. stkOffs -= TARGET_POINTER_SIZE; // return address; if (lvaRetAddrVar != BAD_VAR_NUM) { lvaTable[lvaRetAddrVar].SetStackOffset(stkOffs); } #endif // If we are an OSR method, we "inherit" the frame of the original method // if (opts.IsOSR()) { originalFrameSize = info.compPatchpointInfo->TotalFrameSize(); originalFrameStkOffs = stkOffs; stkOffs -= originalFrameSize; } #ifdef TARGET_XARCH // TODO-AMD64-CQ: for X64 eventually this should be pushed with all the other // calleeregs. When you fix this, you'll also need to fix // the assert at the bottom of this method if (codeGen->doubleAlignOrFramePointerUsed()) { stkOffs -= REGSIZE_BYTES; } #endif int preSpillSize = 0; bool mustDoubleAlign = false; #ifdef TARGET_ARM mustDoubleAlign = true; preSpillSize = genCountBits(codeGen->regSet.rsMaskPreSpillRegs(true)) * REGSIZE_BYTES; #else // !TARGET_ARM #if DOUBLE_ALIGN if (genDoubleAlign()) { mustDoubleAlign = true; // X86 only } #endif #endif // !TARGET_ARM #ifdef TARGET_ARM64 // If the frame pointer is used, then we'll save FP/LR at the bottom of the stack. // Otherwise, we won't store FP, and we'll store LR at the top, with the other callee-save // registers (if any). int initialStkOffs = 0; if (info.compIsVarArgs) { // For varargs we always save all of the integer register arguments // so that they are contiguous with the incoming stack arguments. initialStkOffs = MAX_REG_ARG * REGSIZE_BYTES; stkOffs -= initialStkOffs; } if (codeGen->IsSaveFpLrWithAllCalleeSavedRegisters() || !isFramePointerUsed()) // Note that currently we always have a frame pointer { stkOffs -= compCalleeRegsPushed * REGSIZE_BYTES; } else { // Subtract off FP and LR. assert(compCalleeRegsPushed >= 2); stkOffs -= (compCalleeRegsPushed - 2) * REGSIZE_BYTES; } #else // !TARGET_ARM64 #ifdef TARGET_ARM // On ARM32 LR is part of the pushed registers and is always stored at the // top. if (lvaRetAddrVar != BAD_VAR_NUM) { lvaTable[lvaRetAddrVar].SetStackOffset(stkOffs - REGSIZE_BYTES); } #endif stkOffs -= compCalleeRegsPushed * REGSIZE_BYTES; #endif // !TARGET_ARM64 // (2) Account for the remainder of the frame // // From this point on the code must generally adjust both // stkOffs and the local frame size. The latter is done via: // // lvaIncrementFrameSize -- for space not associated with a local var // lvaAllocLocalAndSetVirtualOffset -- for space associated with a local var // // One exception to the above: OSR locals that have offsets within the Tier0 // portion of the frame. // compLclFrameSize = 0; #ifdef TARGET_AMD64 // For methods with patchpoints, the Tier0 method must reserve // space for all the callee saves, as this area is shared with the // OSR method, and we have to anticipate that collectively the // Tier0 and OSR methods end up saving all callee saves. // // Currently this is x64 only. // if (doesMethodHavePatchpoints() || doesMethodHavePartialCompilationPatchpoints()) { const unsigned regsPushed = compCalleeRegsPushed + (codeGen->isFramePointerUsed() ? 1 : 0); const unsigned extraSlots = genCountBits(RBM_OSR_INT_CALLEE_SAVED) - regsPushed; const unsigned extraSlotSize = extraSlots * REGSIZE_BYTES; JITDUMP("\nMethod has patchpoints and has %u callee saves.\n" "Reserving %u extra slots (%u bytes) for potential OSR method callee saves\n", regsPushed, extraSlots, extraSlotSize); stkOffs -= extraSlotSize; lvaIncrementFrameSize(extraSlotSize); } // In case of Amd64 compCalleeRegsPushed does not include float regs (Xmm6-xmm15) that // need to be pushed. But Amd64 doesn't support push/pop of xmm registers. // Instead we need to allocate space for them on the stack and save them in prolog. // Therefore, we consider xmm registers being saved while computing stack offsets // but space for xmm registers is considered part of compLclFrameSize. // Notes // 1) We need to save the entire 128-bits of xmm register to stack, since amd64 // prolog unwind codes allow encoding of an instruction that stores the entire xmm reg // at an offset relative to SP // 2) We adjust frame size so that SP is aligned at 16-bytes after pushing integer registers. // This means while saving the first xmm register to its allocated stack location we might // have to skip 8-bytes. The reason for padding is to use efficient "movaps" to save/restore // xmm registers to/from stack to match Jit64 codegen. Without the aligning on 16-byte // boundary we would have to use movups when offset turns out unaligned. Movaps is more // performant than movups. const unsigned calleeFPRegsSavedSize = genCountBits(compCalleeFPRegsSavedMask) * XMM_REGSIZE_BYTES; // For OSR the alignment pad computation should not take the original frame into account. // Original frame size includes the pseudo-saved RA and so is always = 8 mod 16. const int offsetForAlign = -(stkOffs + originalFrameSize); if ((calleeFPRegsSavedSize > 0) && ((offsetForAlign % XMM_REGSIZE_BYTES) != 0)) { // Take care of alignment int alignPad = (int)AlignmentPad((unsigned)offsetForAlign, XMM_REGSIZE_BYTES); assert(alignPad != 0); stkOffs -= alignPad; lvaIncrementFrameSize(alignPad); } stkOffs -= calleeFPRegsSavedSize; lvaIncrementFrameSize(calleeFPRegsSavedSize); // Quirk for VS debug-launch scenario to work if (compVSQuirkStackPaddingNeeded > 0) { #ifdef DEBUG if (verbose) { printf("\nAdding VS quirk stack padding of %d bytes between save-reg area and locals\n", compVSQuirkStackPaddingNeeded); } #endif // DEBUG stkOffs -= compVSQuirkStackPaddingNeeded; lvaIncrementFrameSize(compVSQuirkStackPaddingNeeded); } #endif // TARGET_AMD64 #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARMARCH) if (lvaPSPSym != BAD_VAR_NUM) { // On ARM/ARM64, if we need a PSPSym, allocate it first, before anything else, including // padding (so we can avoid computing the same padding in the funclet // frame). Note that there is no special padding requirement for the PSPSym. noway_assert(codeGen->isFramePointerUsed()); // We need an explicit frame pointer stkOffs = lvaAllocLocalAndSetVirtualOffset(lvaPSPSym, TARGET_POINTER_SIZE, stkOffs); } #endif // FEATURE_EH_FUNCLETS && defined(TARGET_ARMARCH) if (mustDoubleAlign) { if (lvaDoneFrameLayout != FINAL_FRAME_LAYOUT) { // Allocate a pointer sized stack slot, since we may need to double align here // when lvaDoneFrameLayout == FINAL_FRAME_LAYOUT // lvaIncrementFrameSize(TARGET_POINTER_SIZE); stkOffs -= TARGET_POINTER_SIZE; // If we have any TYP_LONG, TYP_DOUBLE or double aligned structs // then we need to allocate a second pointer sized stack slot, // since we may need to double align that LclVar when we see it // in the loop below. We will just always do this so that the // offsets that we calculate for the stack frame will always // be greater (or equal) to what they can be in the final layout. // lvaIncrementFrameSize(TARGET_POINTER_SIZE); stkOffs -= TARGET_POINTER_SIZE; } else // FINAL_FRAME_LAYOUT { if (((stkOffs + preSpillSize) % (2 * TARGET_POINTER_SIZE)) != 0) { lvaIncrementFrameSize(TARGET_POINTER_SIZE); stkOffs -= TARGET_POINTER_SIZE; } // We should now have a double-aligned (stkOffs+preSpillSize) noway_assert(((stkOffs + preSpillSize) % (2 * TARGET_POINTER_SIZE)) == 0); } } if (lvaMonAcquired != BAD_VAR_NUM) { // For OSR we use the flag set up by the original method. // if (opts.IsOSR()) { assert(info.compPatchpointInfo->HasMonitorAcquired()); int originalOffset = info.compPatchpointInfo->MonitorAcquiredOffset(); int offset = originalFrameStkOffs + originalOffset; JITDUMP( "---OSR--- V%02u (on tier0 frame, monitor aquired) tier0 FP-rel offset %d tier0 frame offset %d new " "virt offset %d\n", lvaMonAcquired, originalOffset, originalFrameStkOffs, offset); lvaTable[lvaMonAcquired].SetStackOffset(offset); } else { // This var must go first, in what is called the 'frame header' for EnC so that it is // preserved when remapping occurs. See vm\eetwain.cpp for detailed comment specifying frame // layout requirements for EnC to work. stkOffs = lvaAllocLocalAndSetVirtualOffset(lvaMonAcquired, lvaLclSize(lvaMonAcquired), stkOffs); } } #ifdef JIT32_GCENCODER if (lvaLocAllocSPvar != BAD_VAR_NUM) { noway_assert(codeGen->isFramePointerUsed()); // else offsets of locals of frameless methods will be incorrect stkOffs = lvaAllocLocalAndSetVirtualOffset(lvaLocAllocSPvar, TARGET_POINTER_SIZE, stkOffs); } #endif // JIT32_GCENCODER // For OSR methods, param type args are always reportable via the root method frame slot. // (see gcInfoBlockHdrSave) and so do not need a new slot on the frame. // // OSR methods may also be able to use the root frame kept alive this, if the root // method needed to report this. // // Inlining done under OSR may introduce new reporting, in which case the OSR frame // must allocate a slot. if (lvaReportParamTypeArg()) { #ifdef JIT32_GCENCODER noway_assert(codeGen->isFramePointerUsed()); #endif if (opts.IsOSR()) { PatchpointInfo* ppInfo = info.compPatchpointInfo; assert(ppInfo->HasGenericContextArgOffset()); const int originalOffset = ppInfo->GenericContextArgOffset(); lvaCachedGenericContextArgOffs = originalFrameStkOffs + originalOffset; } else { // For CORINFO_CALLCONV_PARAMTYPE (if needed) lvaIncrementFrameSize(TARGET_POINTER_SIZE); stkOffs -= TARGET_POINTER_SIZE; lvaCachedGenericContextArgOffs = stkOffs; } } #ifndef JIT32_GCENCODER else if (lvaKeepAliveAndReportThis()) { bool canUseExistingSlot = false; if (opts.IsOSR()) { PatchpointInfo* ppInfo = info.compPatchpointInfo; if (ppInfo->HasKeptAliveThis()) { const int originalOffset = ppInfo->KeptAliveThisOffset(); lvaCachedGenericContextArgOffs = originalFrameStkOffs + originalOffset; canUseExistingSlot = true; } } if (!canUseExistingSlot) { // When "this" is also used as generic context arg. lvaIncrementFrameSize(TARGET_POINTER_SIZE); stkOffs -= TARGET_POINTER_SIZE; lvaCachedGenericContextArgOffs = stkOffs; } } #endif #if !defined(FEATURE_EH_FUNCLETS) /* If we need space for slots for shadow SP, reserve it now */ if (ehNeedsShadowSPslots()) { noway_assert(codeGen->isFramePointerUsed()); // else offsets of locals of frameless methods will be incorrect if (!lvaReportParamTypeArg()) { #ifndef JIT32_GCENCODER if (!lvaKeepAliveAndReportThis()) #endif { // In order to keep the gc info encoding smaller, the VM assumes that all methods with EH // have also saved space for a ParamTypeArg, so we need to do that here lvaIncrementFrameSize(TARGET_POINTER_SIZE); stkOffs -= TARGET_POINTER_SIZE; } } stkOffs = lvaAllocLocalAndSetVirtualOffset(lvaShadowSPslotsVar, lvaLclSize(lvaShadowSPslotsVar), stkOffs); } #endif // !FEATURE_EH_FUNCLETS if (compGSReorderStackLayout) { assert(getNeedsGSSecurityCookie()); if (!opts.IsOSR() || !info.compPatchpointInfo->HasSecurityCookie()) { stkOffs = lvaAllocLocalAndSetVirtualOffset(lvaGSSecurityCookie, lvaLclSize(lvaGSSecurityCookie), stkOffs); } } /* If we're supposed to track lifetimes of pointer temps, we'll assign frame offsets in the following order: non-pointer local variables (also untracked pointer variables) pointer local variables pointer temps non-pointer temps */ enum Allocation { ALLOC_NON_PTRS = 0x1, // assign offsets to non-ptr ALLOC_PTRS = 0x2, // Second pass, assign offsets to tracked ptrs ALLOC_UNSAFE_BUFFERS = 0x4, ALLOC_UNSAFE_BUFFERS_WITH_PTRS = 0x8 }; UINT alloc_order[5]; unsigned int cur = 0; if (compGSReorderStackLayout) { noway_assert(getNeedsGSSecurityCookie()); if (codeGen->isFramePointerUsed()) { alloc_order[cur++] = ALLOC_UNSAFE_BUFFERS; alloc_order[cur++] = ALLOC_UNSAFE_BUFFERS_WITH_PTRS; } } bool tempsAllocated = false; if (lvaTempsHaveLargerOffsetThanVars() && !codeGen->isFramePointerUsed()) { // Because we want the temps to have a larger offset than locals // and we're not using a frame pointer, we have to place the temps // above the vars. Otherwise we place them after the vars (at the // bottom of the frame). noway_assert(!tempsAllocated); stkOffs = lvaAllocateTemps(stkOffs, mustDoubleAlign); tempsAllocated = true; } alloc_order[cur++] = ALLOC_NON_PTRS; if (opts.compDbgEnC) { /* We will use just one pass, and assign offsets to all variables */ alloc_order[cur - 1] |= ALLOC_PTRS; noway_assert(compGSReorderStackLayout == false); } else { alloc_order[cur++] = ALLOC_PTRS; } if (!codeGen->isFramePointerUsed() && compGSReorderStackLayout) { alloc_order[cur++] = ALLOC_UNSAFE_BUFFERS_WITH_PTRS; alloc_order[cur++] = ALLOC_UNSAFE_BUFFERS; } alloc_order[cur] = 0; noway_assert(cur < ArrLen(alloc_order)); // Force first pass to happen UINT assignMore = 0xFFFFFFFF; bool have_LclVarDoubleAlign = false; for (cur = 0; alloc_order[cur]; cur++) { if ((assignMore & alloc_order[cur]) == 0) { continue; } assignMore = 0; unsigned lclNum; LclVarDsc* varDsc; for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++) { /* Ignore field locals of the promotion type PROMOTION_TYPE_FIELD_DEPENDENT. In other words, we will not calculate the "base" address of the struct local if the promotion type is PROMOTION_TYPE_FIELD_DEPENDENT. */ if (lvaIsFieldOfDependentlyPromotedStruct(varDsc)) { continue; } #if FEATURE_FIXED_OUT_ARGS // The scratch mem is used for the outgoing arguments, and it must be absolutely last if (lclNum == lvaOutgoingArgSpaceVar) { continue; } #endif bool allocateOnFrame = varDsc->lvOnFrame; if (varDsc->lvRegister && (lvaDoneFrameLayout == REGALLOC_FRAME_LAYOUT) && ((varDsc->TypeGet() != TYP_LONG) || (varDsc->GetOtherReg() != REG_STK))) { allocateOnFrame = false; } // For OSR args and locals, we use the slots on the original frame. // // Note we must do this even for "non frame" locals, as we sometimes // will refer to their memory homes. if (lvaIsOSRLocal(lclNum)) { if (varDsc->lvIsStructField) { const unsigned parentLclNum = varDsc->lvParentLcl; const int parentOriginalOffset = info.compPatchpointInfo->Offset(parentLclNum); const int offset = originalFrameStkOffs + parentOriginalOffset + varDsc->lvFldOffset; JITDUMP("---OSR--- V%02u (promoted field of V%02u; on tier0 frame) tier0 FP-rel offset %d tier0 " "frame offset %d field offset %d new virt offset " "%d\n", lclNum, parentLclNum, parentOriginalOffset, originalFrameStkOffs, varDsc->lvFldOffset, offset); lvaTable[lclNum].SetStackOffset(offset); } else { // Add frampointer-relative offset of this OSR live local in the original frame // to the offset of original frame in our new frame. const int originalOffset = info.compPatchpointInfo->Offset(lclNum); const int offset = originalFrameStkOffs + originalOffset; JITDUMP( "---OSR--- V%02u (on tier0 frame) tier0 FP-rel offset %d tier0 frame offset %d new virt offset " "%d\n", lclNum, originalOffset, originalFrameStkOffs, offset); lvaTable[lclNum].SetStackOffset(offset); } continue; } /* Ignore variables that are not on the stack frame */ if (!allocateOnFrame) { /* For EnC, all variables have to be allocated space on the stack, even though they may actually be enregistered. This way, the frame layout can be directly inferred from the locals-sig. */ if (!opts.compDbgEnC) { continue; } else if (lclNum >= info.compLocalsCount) { // ignore temps for EnC continue; } } else if (lvaGSSecurityCookie == lclNum && getNeedsGSSecurityCookie()) { // Special case for OSR. If the original method had a cookie, // we use its slot on the original frame. if (opts.IsOSR() && info.compPatchpointInfo->HasSecurityCookie()) { int originalOffset = info.compPatchpointInfo->SecurityCookieOffset(); int offset = originalFrameStkOffs + originalOffset; JITDUMP("---OSR--- V%02u (on tier0 frame, security cookie) tier0 FP-rel offset %d tier0 frame " "offset %d new " "virt offset %d\n", lclNum, originalOffset, originalFrameStkOffs, offset); lvaTable[lclNum].SetStackOffset(offset); } continue; } // These need to be located as the very first variables (highest memory address) // and so they have already been assigned an offset if ( #if defined(FEATURE_EH_FUNCLETS) lclNum == lvaPSPSym || #else lclNum == lvaShadowSPslotsVar || #endif // FEATURE_EH_FUNCLETS #ifdef JIT32_GCENCODER lclNum == lvaLocAllocSPvar || #endif // JIT32_GCENCODER lclNum == lvaRetAddrVar) { assert(varDsc->GetStackOffset() != BAD_STK_OFFS); continue; } if (lclNum == lvaMonAcquired) { continue; } // This should be low on the stack. Hence, it will be assigned later. if (lclNum == lvaStubArgumentVar) { #ifdef JIT32_GCENCODER noway_assert(codeGen->isFramePointerUsed()); #endif continue; } // This should be low on the stack. Hence, it will be assigned later. if (lclNum == lvaInlinedPInvokeFrameVar) { noway_assert(codeGen->isFramePointerUsed()); continue; } if (varDsc->lvIsParam) { #if defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI) // On Windows AMD64 we can use the caller-reserved stack area that is already setup assert(varDsc->GetStackOffset() != BAD_STK_OFFS); continue; #else // !TARGET_AMD64 // A register argument that is not enregistered ends up as // a local variable which will need stack frame space. // if (!varDsc->lvIsRegArg) { continue; } #ifdef TARGET_ARM64 if (info.compIsVarArgs && varDsc->GetArgReg() != theFixedRetBuffArgNum()) { // Stack offset to varargs (parameters) should point to home area which will be preallocated. const unsigned regArgNum = genMapIntRegNumToRegArgNum(varDsc->GetArgReg()); varDsc->SetStackOffset(-initialStkOffs + regArgNum * REGSIZE_BYTES); continue; } #endif #ifdef TARGET_ARM // On ARM we spill the registers in codeGen->regSet.rsMaskPreSpillRegArg // in the prolog, thus they don't need stack frame space. // if ((codeGen->regSet.rsMaskPreSpillRegs(false) & genRegMask(varDsc->GetArgReg())) != 0) { assert(varDsc->GetStackOffset() != BAD_STK_OFFS); continue; } #endif #endif // !TARGET_AMD64 } /* Make sure the type is appropriate */ if (varDsc->lvIsUnsafeBuffer && compGSReorderStackLayout) { if (varDsc->lvIsPtr) { if ((alloc_order[cur] & ALLOC_UNSAFE_BUFFERS_WITH_PTRS) == 0) { assignMore |= ALLOC_UNSAFE_BUFFERS_WITH_PTRS; continue; } } else { if ((alloc_order[cur] & ALLOC_UNSAFE_BUFFERS) == 0) { assignMore |= ALLOC_UNSAFE_BUFFERS; continue; } } } else if (varTypeIsGC(varDsc->TypeGet()) && varDsc->lvTracked) { if ((alloc_order[cur] & ALLOC_PTRS) == 0) { assignMore |= ALLOC_PTRS; continue; } } else { if ((alloc_order[cur] & ALLOC_NON_PTRS) == 0) { assignMore |= ALLOC_NON_PTRS; continue; } } /* Need to align the offset? */ if (mustDoubleAlign && (varDsc->lvType == TYP_DOUBLE // Align doubles for ARM and x86 #ifdef TARGET_ARM || varDsc->lvType == TYP_LONG // Align longs for ARM #endif #ifndef TARGET_64BIT || varDsc->lvStructDoubleAlign // Align when lvStructDoubleAlign is true #endif // !TARGET_64BIT )) { noway_assert((compLclFrameSize % TARGET_POINTER_SIZE) == 0); if ((lvaDoneFrameLayout != FINAL_FRAME_LAYOUT) && !have_LclVarDoubleAlign) { // If this is the first TYP_LONG, TYP_DOUBLE or double aligned struct // then we have seen in this loop then we allocate a pointer sized // stack slot since we may need to double align this LclVar // when lvaDoneFrameLayout == FINAL_FRAME_LAYOUT // lvaIncrementFrameSize(TARGET_POINTER_SIZE); stkOffs -= TARGET_POINTER_SIZE; } else { if (((stkOffs + preSpillSize) % (2 * TARGET_POINTER_SIZE)) != 0) { lvaIncrementFrameSize(TARGET_POINTER_SIZE); stkOffs -= TARGET_POINTER_SIZE; } // We should now have a double-aligned (stkOffs+preSpillSize) noway_assert(((stkOffs + preSpillSize) % (2 * TARGET_POINTER_SIZE)) == 0); } // Remember that we had to double align a LclVar have_LclVarDoubleAlign = true; } // Reserve the stack space for this variable stkOffs = lvaAllocLocalAndSetVirtualOffset(lclNum, lvaLclSize(lclNum), stkOffs); #ifdef TARGET_ARMARCH // If we have an incoming register argument that has a struct promoted field // then we need to copy the lvStkOff (the stack home) from the reg arg to the field lclvar // if (varDsc->lvIsRegArg && varDsc->lvPromotedStruct()) { unsigned firstFieldNum = varDsc->lvFieldLclStart; for (unsigned i = 0; i < varDsc->lvFieldCnt; i++) { LclVarDsc* fieldVarDsc = lvaGetDesc(firstFieldNum + i); fieldVarDsc->SetStackOffset(varDsc->GetStackOffset() + fieldVarDsc->lvFldOffset); } } #ifdef TARGET_ARM // If we have an incoming register argument that has a promoted long // then we need to copy the lvStkOff (the stack home) from the reg arg to the field lclvar // else if (varDsc->lvIsRegArg && varDsc->lvPromoted) { assert(varTypeIsLong(varDsc) && (varDsc->lvFieldCnt == 2)); unsigned fieldVarNum = varDsc->lvFieldLclStart; lvaTable[fieldVarNum].SetStackOffset(varDsc->GetStackOffset()); lvaTable[fieldVarNum + 1].SetStackOffset(varDsc->GetStackOffset() + 4); } #endif // TARGET_ARM #endif // TARGET_ARM64 } } if (getNeedsGSSecurityCookie() && !compGSReorderStackLayout) { if (!opts.IsOSR() || !info.compPatchpointInfo->HasSecurityCookie()) { // LOCALLOC used, but we have no unsafe buffer. Allocated cookie last, close to localloc buffer. stkOffs = lvaAllocLocalAndSetVirtualOffset(lvaGSSecurityCookie, lvaLclSize(lvaGSSecurityCookie), stkOffs); } } if (tempsAllocated == false) { /*------------------------------------------------------------------------- * * Now the temps * *------------------------------------------------------------------------- */ stkOffs = lvaAllocateTemps(stkOffs, mustDoubleAlign); } /*------------------------------------------------------------------------- * * Now do some final stuff * *------------------------------------------------------------------------- */ // lvaInlinedPInvokeFrameVar and lvaStubArgumentVar need to be assigned last // Important: The stack walker depends on lvaStubArgumentVar immediately // following lvaInlinedPInvokeFrameVar in the frame. if (lvaStubArgumentVar != BAD_VAR_NUM) { #ifdef JIT32_GCENCODER noway_assert(codeGen->isFramePointerUsed()); #endif stkOffs = lvaAllocLocalAndSetVirtualOffset(lvaStubArgumentVar, lvaLclSize(lvaStubArgumentVar), stkOffs); } if (lvaInlinedPInvokeFrameVar != BAD_VAR_NUM) { noway_assert(codeGen->isFramePointerUsed()); stkOffs = lvaAllocLocalAndSetVirtualOffset(lvaInlinedPInvokeFrameVar, lvaLclSize(lvaInlinedPInvokeFrameVar), stkOffs); } if (mustDoubleAlign) { if (lvaDoneFrameLayout != FINAL_FRAME_LAYOUT) { // Allocate a pointer sized stack slot, since we may need to double align here // when lvaDoneFrameLayout == FINAL_FRAME_LAYOUT // lvaIncrementFrameSize(TARGET_POINTER_SIZE); stkOffs -= TARGET_POINTER_SIZE; if (have_LclVarDoubleAlign) { // If we have any TYP_LONG, TYP_DOUBLE or double aligned structs // the we need to allocate a second pointer sized stack slot, // since we may need to double align the last LclVar that we saw // in the loop above. We do this so that the offsets that we // calculate for the stack frame are always greater than they will // be in the final layout. // lvaIncrementFrameSize(TARGET_POINTER_SIZE); stkOffs -= TARGET_POINTER_SIZE; } } else // FINAL_FRAME_LAYOUT { if (((stkOffs + preSpillSize) % (2 * TARGET_POINTER_SIZE)) != 0) { lvaIncrementFrameSize(TARGET_POINTER_SIZE); stkOffs -= TARGET_POINTER_SIZE; } // We should now have a double-aligned (stkOffs+preSpillSize) noway_assert(((stkOffs + preSpillSize) % (2 * TARGET_POINTER_SIZE)) == 0); } } #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_AMD64) if (lvaPSPSym != BAD_VAR_NUM) { // On AMD64, if we need a PSPSym, allocate it last, immediately above the outgoing argument // space. Any padding will be higher on the stack than this // (including the padding added by lvaAlignFrame()). noway_assert(codeGen->isFramePointerUsed()); // We need an explicit frame pointer stkOffs = lvaAllocLocalAndSetVirtualOffset(lvaPSPSym, TARGET_POINTER_SIZE, stkOffs); } #endif // FEATURE_EH_FUNCLETS && defined(TARGET_AMD64) #ifdef TARGET_ARM64 if (!codeGen->IsSaveFpLrWithAllCalleeSavedRegisters() && isFramePointerUsed()) // Note that currently we always have a frame pointer { // Create space for saving FP and LR. stkOffs -= 2 * REGSIZE_BYTES; } #endif // TARGET_ARM64 #if FEATURE_FIXED_OUT_ARGS if (lvaOutgoingArgSpaceSize > 0) { #if defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI) // No 4 slots for outgoing params on System V. noway_assert(lvaOutgoingArgSpaceSize >= (4 * TARGET_POINTER_SIZE)); #endif noway_assert((lvaOutgoingArgSpaceSize % TARGET_POINTER_SIZE) == 0); // Give it a value so we can avoid asserts in CHK builds. // Since this will always use an SP relative offset of zero // at the end of lvaFixVirtualFrameOffsets, it will be set to absolute '0' stkOffs = lvaAllocLocalAndSetVirtualOffset(lvaOutgoingArgSpaceVar, lvaLclSize(lvaOutgoingArgSpaceVar), stkOffs); } #endif // FEATURE_FIXED_OUT_ARGS // compLclFrameSize equals our negated virtual stack offset minus the pushed registers and return address // and the pushed frame pointer register which for some strange reason isn't part of 'compCalleeRegsPushed'. int pushedCount = compCalleeRegsPushed; #ifdef TARGET_ARM64 if (info.compIsVarArgs) { pushedCount += MAX_REG_ARG; } #endif #ifdef TARGET_XARCH if (codeGen->doubleAlignOrFramePointerUsed()) { pushedCount += 1; // pushed EBP (frame pointer) } pushedCount += 1; // pushed PC (return address) #endif noway_assert(compLclFrameSize + originalFrameSize == (unsigned)-(stkOffs + (pushedCount * (int)TARGET_POINTER_SIZE))); } int Compiler::lvaAllocLocalAndSetVirtualOffset(unsigned lclNum, unsigned size, int stkOffs) { noway_assert(lclNum != BAD_VAR_NUM); #ifdef TARGET_64BIT // Before final frame layout, assume the worst case, that every >=8 byte local will need // maximum padding to be aligned. This is because we generate code based on the stack offset // computed during tentative frame layout. These offsets cannot get bigger during final // frame layout, as that would possibly require different code generation (for example, // using a 4-byte offset instead of a 1-byte offset in an instruction). The offsets can get // smaller. It is possible there is different alignment at the point locals are allocated // between tentative and final frame layout which would introduce padding between locals // and thus increase the offset (from the stack pointer) of one of the locals. Hence the // need to assume the worst alignment before final frame layout. // We could probably improve this by sorting all the objects by alignment, // such that all 8 byte objects are together, 4 byte objects are together, etc., which // would require at most one alignment padding per group. // // TYP_SIMD structs locals have alignment preference given by getSIMDTypeAlignment() for // better performance. if ((size >= 8) && ((lvaDoneFrameLayout != FINAL_FRAME_LAYOUT) || ((stkOffs % 8) != 0) #if defined(FEATURE_SIMD) && ALIGN_SIMD_TYPES || lclVarIsSIMDType(lclNum) #endif )) { // Note that stack offsets are negative or equal to zero assert(stkOffs <= 0); // alignment padding unsigned pad = 0; #if defined(FEATURE_SIMD) && ALIGN_SIMD_TYPES if (lclVarIsSIMDType(lclNum) && !lvaIsImplicitByRefLocal(lclNum)) { int alignment = getSIMDTypeAlignment(lvaTable[lclNum].lvType); if (stkOffs % alignment != 0) { if (lvaDoneFrameLayout != FINAL_FRAME_LAYOUT) { pad = alignment - 1; // Note that all the objects will probably be misaligned, but we'll fix that in final layout. } else { pad = alignment + (stkOffs % alignment); // +1 to +(alignment-1) bytes } } } else #endif // FEATURE_SIMD && ALIGN_SIMD_TYPES { if (lvaDoneFrameLayout != FINAL_FRAME_LAYOUT) { pad = 7; // Note that all the objects will probably be misaligned, but we'll fix that in final layout. } else { pad = 8 + (stkOffs % 8); // +1 to +7 bytes } } // Will the pad ever be anything except 4? Do we put smaller-than-4-sized objects on the stack? lvaIncrementFrameSize(pad); stkOffs -= pad; #ifdef DEBUG if (verbose) { printf("Pad "); gtDispLclVar(lclNum, /*pad*/ false); printf(", size=%d, stkOffs=%c0x%x, pad=%d\n", size, stkOffs < 0 ? '-' : '+', stkOffs < 0 ? -stkOffs : stkOffs, pad); } #endif } #endif // TARGET_64BIT /* Reserve space on the stack by bumping the frame size */ lvaIncrementFrameSize(size); stkOffs -= size; lvaTable[lclNum].SetStackOffset(stkOffs); #ifdef DEBUG if (verbose) { printf("Assign "); gtDispLclVar(lclNum, /*pad*/ false); printf(", size=%d, stkOffs=%c0x%x\n", size, stkOffs < 0 ? '-' : '+', stkOffs < 0 ? -stkOffs : stkOffs); } #endif return stkOffs; } #ifdef TARGET_AMD64 /***************************************************************************** * lvaIsCalleeSavedIntRegCountEven() : returns true if the number of integer registers * pushed onto stack is even including RBP if used as frame pointer * * Note that this excludes return address (PC) pushed by caller. To know whether * the SP offset after pushing integer registers is aligned, we need to take * negation of this routine. */ bool Compiler::lvaIsCalleeSavedIntRegCountEven() { unsigned regsPushed = compCalleeRegsPushed + (codeGen->isFramePointerUsed() ? 1 : 0); return (regsPushed % (16 / REGSIZE_BYTES)) == 0; } #endif // TARGET_AMD64 /***************************************************************************** * lvaAlignFrame() : After allocating everything on the frame, reserve any * extra space needed to keep the frame aligned */ void Compiler::lvaAlignFrame() { #if defined(TARGET_AMD64) // Leaf frames do not need full alignment, but the unwind info is smaller if we // are at least 8 byte aligned (and we assert as much) if ((compLclFrameSize % 8) != 0) { lvaIncrementFrameSize(8 - (compLclFrameSize % 8)); } else if (lvaDoneFrameLayout != FINAL_FRAME_LAYOUT) { // If we are not doing final layout, we don't know the exact value of compLclFrameSize // and thus do not know how much we will need to add in order to be aligned. // We add 8 so compLclFrameSize is still a multiple of 8. lvaIncrementFrameSize(8); } assert((compLclFrameSize % 8) == 0); // Ensure that the stack is always 16-byte aligned by grabbing an unused QWORD // if needed, but off by 8 because of the return value. // And don't forget that compCalleeRegsPused does *not* include RBP if we are // using it as the frame pointer. // bool regPushedCountAligned = lvaIsCalleeSavedIntRegCountEven(); bool lclFrameSizeAligned = (compLclFrameSize % 16) == 0; // If this isn't the final frame layout, assume we have to push an extra QWORD // Just so the offsets are true upper limits. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef UNIX_AMD64_ABI // The compNeedToAlignFrame flag is indicating if there is a need to align the frame. // On AMD64-Windows, if there are calls, 4 slots for the outgoing ars are allocated, except for // FastTailCall. This slots makes the frame size non-zero, so alignment logic will be called. // On AMD64-Unix, there are no such slots. There is a possibility to have calls in the method with frame size of 0. // The frame alignment logic won't kick in. This flags takes care of the AMD64-Unix case by remembering that there // are calls and making sure the frame alignment logic is executed. bool stackNeedsAlignment = (compLclFrameSize != 0 || opts.compNeedToAlignFrame); #else // !UNIX_AMD64_ABI bool stackNeedsAlignment = compLclFrameSize != 0; #endif // !UNIX_AMD64_ABI if ((!codeGen->isFramePointerUsed() && (lvaDoneFrameLayout != FINAL_FRAME_LAYOUT)) || (stackNeedsAlignment && (regPushedCountAligned == lclFrameSizeAligned))) { lvaIncrementFrameSize(REGSIZE_BYTES); } #elif defined(TARGET_ARM64) // The stack on ARM64 must be 16 byte aligned. // First, align up to 8. if ((compLclFrameSize % 8) != 0) { lvaIncrementFrameSize(8 - (compLclFrameSize % 8)); } else if (lvaDoneFrameLayout != FINAL_FRAME_LAYOUT) { // If we are not doing final layout, we don't know the exact value of compLclFrameSize // and thus do not know how much we will need to add in order to be aligned. // We add 8 so compLclFrameSize is still a multiple of 8. lvaIncrementFrameSize(8); } assert((compLclFrameSize % 8) == 0); // Ensure that the stack is always 16-byte aligned by grabbing an unused QWORD // if needed. bool regPushedCountAligned = (compCalleeRegsPushed % (16 / REGSIZE_BYTES)) == 0; bool lclFrameSizeAligned = (compLclFrameSize % 16) == 0; // If this isn't the final frame layout, assume we have to push an extra QWORD // Just so the offsets are true upper limits. if ((lvaDoneFrameLayout != FINAL_FRAME_LAYOUT) || (regPushedCountAligned != lclFrameSizeAligned)) { lvaIncrementFrameSize(REGSIZE_BYTES); } #elif defined(TARGET_ARM) // Ensure that stack offsets will be double-aligned by grabbing an unused DWORD if needed. // bool lclFrameSizeAligned = (compLclFrameSize % sizeof(double)) == 0; bool regPushedCountAligned = ((compCalleeRegsPushed + genCountBits(codeGen->regSet.rsMaskPreSpillRegs(true))) % (sizeof(double) / TARGET_POINTER_SIZE)) == 0; if (regPushedCountAligned != lclFrameSizeAligned) { lvaIncrementFrameSize(TARGET_POINTER_SIZE); } #elif defined(TARGET_X86) #if DOUBLE_ALIGN if (genDoubleAlign()) { // Double Frame Alignment for x86 is handled in Compiler::lvaAssignVirtualFrameOffsetsToLocals() if (compLclFrameSize == 0) { // This can only happen with JitStress=1 or JitDoubleAlign=2 lvaIncrementFrameSize(TARGET_POINTER_SIZE); } } #endif if (STACK_ALIGN > REGSIZE_BYTES) { if (lvaDoneFrameLayout != FINAL_FRAME_LAYOUT) { // If we are not doing final layout, we don't know the exact value of compLclFrameSize // and thus do not know how much we will need to add in order to be aligned. // We add the maximum pad that we could ever have (which is 12) lvaIncrementFrameSize(STACK_ALIGN - REGSIZE_BYTES); } // Align the stack with STACK_ALIGN value. int adjustFrameSize = compLclFrameSize; #if defined(UNIX_X86_ABI) bool isEbpPushed = codeGen->isFramePointerUsed(); #if DOUBLE_ALIGN isEbpPushed |= genDoubleAlign(); #endif // we need to consider spilled register(s) plus return address and/or EBP int adjustCount = compCalleeRegsPushed + 1 + (isEbpPushed ? 1 : 0); adjustFrameSize += (adjustCount * REGSIZE_BYTES) % STACK_ALIGN; #endif if ((adjustFrameSize % STACK_ALIGN) != 0) { lvaIncrementFrameSize(STACK_ALIGN - (adjustFrameSize % STACK_ALIGN)); } } #else NYI("TARGET specific lvaAlignFrame"); #endif // !TARGET_AMD64 } /***************************************************************************** * lvaAssignFrameOffsetsToPromotedStructs() : Assign offsets to fields * within a promoted struct (worker for lvaAssignFrameOffsets). */ void Compiler::lvaAssignFrameOffsetsToPromotedStructs() { LclVarDsc* varDsc = lvaTable; for (unsigned lclNum = 0; lclNum < lvaCount; lclNum++, varDsc++) { // For promoted struct fields that are params, we will // assign their offsets in lvaAssignVirtualFrameOffsetToArg(). // This is not true for the System V systems since there is no // outgoing args space. Assign the dependently promoted fields properly. // CLANG_FORMAT_COMMENT_ANCHOR; #if defined(UNIX_AMD64_ABI) || defined(TARGET_ARM) || defined(TARGET_X86) // ARM: lo/hi parts of a promoted long arg need to be updated. // // For System V platforms there is no outgoing args space. // // For System V and x86, a register passed struct arg is homed on the stack in a separate local var. // The offset of these structs is already calculated in lvaAssignVirtualFrameOffsetToArg methos. // Make sure the code below is not executed for these structs and the offset is not changed. // const bool mustProcessParams = true; #else // OSR must also assign offsets here. // const bool mustProcessParams = opts.IsOSR(); #endif // defined(UNIX_AMD64_ABI) || defined(TARGET_ARM) || defined(TARGET_X86) if (varDsc->lvIsStructField && (!varDsc->lvIsParam || mustProcessParams)) { LclVarDsc* parentvarDsc = lvaGetDesc(varDsc->lvParentLcl); lvaPromotionType promotionType = lvaGetPromotionType(parentvarDsc); if (promotionType == PROMOTION_TYPE_INDEPENDENT) { // The stack offset for these field locals must have been calculated // by the normal frame offset assignment. continue; } else { noway_assert(promotionType == PROMOTION_TYPE_DEPENDENT); noway_assert(varDsc->lvOnFrame); if (parentvarDsc->lvOnFrame) { JITDUMP("Adjusting offset of dependent V%02u of V%02u: parent %u field %u net %u\n", lclNum, varDsc->lvParentLcl, parentvarDsc->GetStackOffset(), varDsc->lvFldOffset, parentvarDsc->GetStackOffset() + varDsc->lvFldOffset); varDsc->SetStackOffset(parentvarDsc->GetStackOffset() + varDsc->lvFldOffset); } else { varDsc->lvOnFrame = false; noway_assert(varDsc->lvRefCnt() == 0); } } } } } /***************************************************************************** * lvaAllocateTemps() : Assign virtual offsets to temps (always negative). */ int Compiler::lvaAllocateTemps(int stkOffs, bool mustDoubleAlign) { unsigned spillTempSize = 0; if (lvaDoneFrameLayout == FINAL_FRAME_LAYOUT) { int preSpillSize = 0; #ifdef TARGET_ARM preSpillSize = genCountBits(codeGen->regSet.rsMaskPreSpillRegs(true)) * TARGET_POINTER_SIZE; #endif /* Allocate temps */ assert(codeGen->regSet.tmpAllFree()); for (TempDsc* temp = codeGen->regSet.tmpListBeg(); temp != nullptr; temp = codeGen->regSet.tmpListNxt(temp)) { var_types tempType = temp->tdTempType(); unsigned size = temp->tdTempSize(); /* Figure out and record the stack offset of the temp */ /* Need to align the offset? */ CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_64BIT if (varTypeIsGC(tempType) && ((stkOffs % TARGET_POINTER_SIZE) != 0)) { // Calculate 'pad' as the number of bytes to align up 'stkOffs' to be a multiple of TARGET_POINTER_SIZE // In practice this is really just a fancy way of writing 4. (as all stack locations are at least 4-byte // aligned). Note stkOffs is always negative, so (stkOffs % TARGET_POINTER_SIZE) yields a negative // value. // int alignPad = (int)AlignmentPad((unsigned)-stkOffs, TARGET_POINTER_SIZE); spillTempSize += alignPad; lvaIncrementFrameSize(alignPad); stkOffs -= alignPad; noway_assert((stkOffs % TARGET_POINTER_SIZE) == 0); } #endif if (mustDoubleAlign && (tempType == TYP_DOUBLE)) // Align doubles for x86 and ARM { noway_assert((compLclFrameSize % TARGET_POINTER_SIZE) == 0); if (((stkOffs + preSpillSize) % (2 * TARGET_POINTER_SIZE)) != 0) { spillTempSize += TARGET_POINTER_SIZE; lvaIncrementFrameSize(TARGET_POINTER_SIZE); stkOffs -= TARGET_POINTER_SIZE; } // We should now have a double-aligned (stkOffs+preSpillSize) noway_assert(((stkOffs + preSpillSize) % (2 * TARGET_POINTER_SIZE)) == 0); } spillTempSize += size; lvaIncrementFrameSize(size); stkOffs -= size; temp->tdSetTempOffs(stkOffs); } #ifdef TARGET_ARM // Only required for the ARM platform that we have an accurate estimate for the spillTempSize noway_assert(spillTempSize <= lvaGetMaxSpillTempSize()); #endif } else // We haven't run codegen, so there are no Spill temps yet! { unsigned size = lvaGetMaxSpillTempSize(); lvaIncrementFrameSize(size); stkOffs -= size; } return stkOffs; } #ifdef DEBUG /***************************************************************************** * * Dump the register a local is in right now. It is only the current location, since the location changes and it * is updated throughout code generation based on LSRA register assignments. */ void Compiler::lvaDumpRegLocation(unsigned lclNum) { const LclVarDsc* varDsc = lvaGetDesc(lclNum); #ifdef TARGET_ARM if (varDsc->TypeGet() == TYP_DOUBLE) { // The assigned registers are `lvRegNum:RegNext(lvRegNum)` printf("%3s:%-3s ", getRegName(varDsc->GetRegNum()), getRegName(REG_NEXT(varDsc->GetRegNum()))); } else #endif // TARGET_ARM { printf("%3s ", getRegName(varDsc->GetRegNum())); } } /***************************************************************************** * * Dump the frame location assigned to a local. * It's the home location, even though the variable doesn't always live * in its home location. */ void Compiler::lvaDumpFrameLocation(unsigned lclNum) { int offset; regNumber baseReg; #ifdef TARGET_ARM offset = lvaFrameAddress(lclNum, compLocallocUsed, &baseReg, 0, /* isFloatUsage */ false); #else bool EBPbased; offset = lvaFrameAddress(lclNum, &EBPbased); baseReg = EBPbased ? REG_FPBASE : REG_SPBASE; #endif printf("[%2s%1s%02XH] ", getRegName(baseReg), (offset < 0 ? "-" : "+"), (offset < 0 ? -offset : offset)); } /***************************************************************************** * * dump a single lvaTable entry */ void Compiler::lvaDumpEntry(unsigned lclNum, FrameLayoutState curState, size_t refCntWtdWidth) { LclVarDsc* varDsc = lvaGetDesc(lclNum); var_types type = varDsc->TypeGet(); if (curState == INITIAL_FRAME_LAYOUT) { printf("; "); gtDispLclVar(lclNum); printf(" %7s ", varTypeName(type)); gtDispLclVarStructType(lclNum); } else { if (varDsc->lvRefCnt() == 0) { // Print this with a special indicator that the variable is unused. Even though the // variable itself is unused, it might be a struct that is promoted, so seeing it // can be useful when looking at the promoted struct fields. It's also weird to see // missing var numbers if these aren't printed. printf(";* "); } #if FEATURE_FIXED_OUT_ARGS // Since lvaOutgoingArgSpaceSize is a PhasedVar we can't read it for Dumping until // after we set it to something. else if ((lclNum == lvaOutgoingArgSpaceVar) && lvaOutgoingArgSpaceSize.HasFinalValue() && (lvaOutgoingArgSpaceSize == 0)) { // Similar to above; print this anyway. printf(";# "); } #endif // FEATURE_FIXED_OUT_ARGS else { printf("; "); } gtDispLclVar(lclNum); printf("[V%02u", lclNum); if (varDsc->lvTracked) { printf(",T%02u]", varDsc->lvVarIndex); } else { printf(" ]"); } printf(" (%3u,%*s)", varDsc->lvRefCnt(), (int)refCntWtdWidth, refCntWtd2str(varDsc->lvRefCntWtd())); printf(" %7s ", varTypeName(type)); if (genTypeSize(type) == 0) { printf("(%2d) ", lvaLclSize(lclNum)); } else { printf(" -> "); } // The register or stack location field is 11 characters wide. if ((varDsc->lvRefCnt() == 0) && !varDsc->lvImplicitlyReferenced) { printf("zero-ref "); } else if (varDsc->lvRegister != 0) { // It's always a register, and always in the same register. lvaDumpRegLocation(lclNum); } else if (varDsc->lvOnFrame == 0) { printf("registers "); } else { // For RyuJIT backend, it might be in a register part of the time, but it will definitely have a stack home // location. Otherwise, it's always on the stack. if (lvaDoneFrameLayout != NO_FRAME_LAYOUT) { lvaDumpFrameLocation(lclNum); } } } if (varDsc->lvIsHfa()) { printf(" HFA(%s) ", varTypeName(varDsc->GetHfaType())); } if (varDsc->lvDoNotEnregister) { printf(" do-not-enreg["); if (varDsc->IsAddressExposed()) { printf("X"); } if (varTypeIsStruct(varDsc)) { printf("S"); } if (varDsc->GetDoNotEnregReason() == DoNotEnregisterReason::VMNeedsStackAddr) { printf("V"); } if (lvaEnregEHVars && varDsc->lvLiveInOutOfHndlr) { printf("%c", varDsc->lvSingleDefDisqualifyReason); } if (varDsc->GetDoNotEnregReason() == DoNotEnregisterReason::LocalField) { printf("F"); } if (varDsc->GetDoNotEnregReason() == DoNotEnregisterReason::BlockOp) { printf("B"); } if (varDsc->lvIsMultiRegArg) { printf("A"); } if (varDsc->lvIsMultiRegRet) { printf("R"); } #ifdef JIT32_GCENCODER if (varDsc->lvPinned) printf("P"); #endif // JIT32_GCENCODER printf("]"); } if (varDsc->lvIsMultiRegArg) { printf(" multireg-arg"); } if (varDsc->lvIsMultiRegRet) { printf(" multireg-ret"); } if (varDsc->lvMustInit) { printf(" must-init"); } if (varDsc->IsAddressExposed()) { printf(" addr-exposed"); } if (varDsc->lvHasLdAddrOp) { printf(" ld-addr-op"); } if (varDsc->lvVerTypeInfo.IsThisPtr()) { printf(" this"); } if (varDsc->lvPinned) { printf(" pinned"); } if (varDsc->lvStackByref) { printf(" stack-byref"); } if (varDsc->lvClassHnd != NO_CLASS_HANDLE) { printf(" class-hnd"); } if (varDsc->lvClassIsExact) { printf(" exact"); } if (varDsc->lvLiveInOutOfHndlr) { printf(" EH-live"); } if (varDsc->lvSpillAtSingleDef) { printf(" spill-single-def"); } else if (varDsc->lvSingleDefRegCandidate) { printf(" single-def"); } if (lvaIsOSRLocal(lclNum) && varDsc->lvOnFrame) { printf(" tier0-frame"); } #ifndef TARGET_64BIT if (varDsc->lvStructDoubleAlign) printf(" double-align"); #endif // !TARGET_64BIT if (varDsc->lvOverlappingFields) { printf(" overlapping-fields"); } if (compGSReorderStackLayout && !varDsc->lvRegister) { if (varDsc->lvIsPtr) { printf(" ptr"); } if (varDsc->lvIsUnsafeBuffer) { printf(" unsafe-buffer"); } } if (varDsc->lvIsStructField) { LclVarDsc* parentvarDsc = lvaGetDesc(varDsc->lvParentLcl); #if !defined(TARGET_64BIT) if (varTypeIsLong(parentvarDsc)) { bool isLo = (lclNum == parentvarDsc->lvFieldLclStart); printf(" V%02u.%s(offs=0x%02x)", varDsc->lvParentLcl, isLo ? "lo" : "hi", isLo ? 0 : genTypeSize(TYP_INT)); } else #endif // !defined(TARGET_64BIT) { CORINFO_CLASS_HANDLE typeHnd = parentvarDsc->GetStructHnd(); CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(typeHnd, varDsc->lvFldOrdinal); printf(" V%02u.%s(offs=0x%02x)", varDsc->lvParentLcl, eeGetFieldName(fldHnd), varDsc->lvFldOffset); lvaPromotionType promotionType = lvaGetPromotionType(parentvarDsc); switch (promotionType) { case PROMOTION_TYPE_NONE: printf(" P-NONE"); break; case PROMOTION_TYPE_DEPENDENT: printf(" P-DEP"); break; case PROMOTION_TYPE_INDEPENDENT: printf(" P-INDEP"); break; } } } if (varDsc->lvReason != nullptr) { printf(" \"%s\"", varDsc->lvReason); } printf("\n"); } /***************************************************************************** * * dump the lvaTable */ void Compiler::lvaTableDump(FrameLayoutState curState) { if (curState == NO_FRAME_LAYOUT) { curState = lvaDoneFrameLayout; if (curState == NO_FRAME_LAYOUT) { // Still no layout? Could be a bug, but just display the initial layout curState = INITIAL_FRAME_LAYOUT; } } if (curState == INITIAL_FRAME_LAYOUT) { printf("; Initial"); } else if (curState == PRE_REGALLOC_FRAME_LAYOUT) { printf("; Pre-RegAlloc"); } else if (curState == REGALLOC_FRAME_LAYOUT) { printf("; RegAlloc"); } else if (curState == TENTATIVE_FRAME_LAYOUT) { printf("; Tentative"); } else if (curState == FINAL_FRAME_LAYOUT) { printf("; Final"); } else { printf("UNKNOWN FrameLayoutState!"); unreached(); } printf(" local variable assignments\n"); printf(";\n"); unsigned lclNum; LclVarDsc* varDsc; // Figure out some sizes, to help line things up size_t refCntWtdWidth = 6; // Use 6 as the minimum width if (curState != INITIAL_FRAME_LAYOUT) // don't need this info for INITIAL_FRAME_LAYOUT { for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++) { size_t width = strlen(refCntWtd2str(varDsc->lvRefCntWtd())); if (width > refCntWtdWidth) { refCntWtdWidth = width; } } } // Do the actual output for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++) { lvaDumpEntry(lclNum, curState, refCntWtdWidth); } //------------------------------------------------------------------------- // Display the code-gen temps assert(codeGen->regSet.tmpAllFree()); for (TempDsc* temp = codeGen->regSet.tmpListBeg(); temp != nullptr; temp = codeGen->regSet.tmpListNxt(temp)) { printf("; TEMP_%02u %26s%*s%7s -> ", -temp->tdTempNum(), " ", refCntWtdWidth, " ", varTypeName(temp->tdTempType())); int offset = temp->tdTempOffs(); printf(" [%2s%1s0x%02X]\n", isFramePointerUsed() ? STR_FPBASE : STR_SPBASE, (offset < 0 ? "-" : "+"), (offset < 0 ? -offset : offset)); } if (curState >= TENTATIVE_FRAME_LAYOUT) { printf(";\n"); printf("; Lcl frame size = %d\n", compLclFrameSize); } } #endif // DEBUG /***************************************************************************** * * Conservatively estimate the layout of the stack frame. * * This function is only used before final frame layout. It conservatively estimates the * number of callee-saved registers that must be saved, then calls lvaAssignFrameOffsets(). * To do final frame layout, the callee-saved registers are known precisely, so * lvaAssignFrameOffsets() is called directly. * * Returns the (conservative, that is, overly large) estimated size of the frame, * including the callee-saved registers. This is only used by the emitter during code * generation when estimating the size of the offset of instructions accessing temps, * and only if temps have a larger offset than variables. */ unsigned Compiler::lvaFrameSize(FrameLayoutState curState) { assert(curState < FINAL_FRAME_LAYOUT); unsigned result; /* Layout the stack frame conservatively. Assume all callee-saved registers are spilled to stack */ compCalleeRegsPushed = CNT_CALLEE_SAVED; #if defined(TARGET_ARMARCH) if (compFloatingPointUsed) compCalleeRegsPushed += CNT_CALLEE_SAVED_FLOAT; compCalleeRegsPushed++; // we always push LR. See genPushCalleeSavedRegisters #elif defined(TARGET_AMD64) if (compFloatingPointUsed) { compCalleeFPRegsSavedMask = RBM_FLT_CALLEE_SAVED; } else { compCalleeFPRegsSavedMask = RBM_NONE; } #endif #if DOUBLE_ALIGN if (genDoubleAlign()) { // X86 only - account for extra 4-byte pad that may be created by "and esp, -8" instruction compCalleeRegsPushed++; } #endif #ifdef TARGET_XARCH // Since FP/EBP is included in the SAVED_REG_MAXSZ we need to // subtract 1 register if codeGen->isFramePointerUsed() is true. if (codeGen->isFramePointerUsed()) { compCalleeRegsPushed--; } #endif lvaAssignFrameOffsets(curState); unsigned calleeSavedRegMaxSz = CALLEE_SAVED_REG_MAXSZ; #if defined(TARGET_ARMARCH) if (compFloatingPointUsed) { calleeSavedRegMaxSz += CALLEE_SAVED_FLOAT_MAXSZ; } calleeSavedRegMaxSz += REGSIZE_BYTES; // we always push LR. See genPushCalleeSavedRegisters #endif result = compLclFrameSize + calleeSavedRegMaxSz; return result; } //------------------------------------------------------------------------ // lvaGetSPRelativeOffset: Given a variable, return the offset of that // variable in the frame from the stack pointer. This number will be positive, // since the stack pointer must be at a lower address than everything on the // stack. // // This can't be called for localloc functions, since the stack pointer // varies, and thus there is no fixed offset to a variable from the stack pointer. // // Arguments: // varNum - the variable number // // Return Value: // The offset. int Compiler::lvaGetSPRelativeOffset(unsigned varNum) { assert(!compLocallocUsed); assert(lvaDoneFrameLayout == FINAL_FRAME_LAYOUT); const LclVarDsc* varDsc = lvaGetDesc(varNum); assert(varDsc->lvOnFrame); int spRelativeOffset; if (varDsc->lvFramePointerBased) { // The stack offset is relative to the frame pointer, so convert it to be // relative to the stack pointer (which makes no sense for localloc functions). spRelativeOffset = varDsc->GetStackOffset() + codeGen->genSPtoFPdelta(); } else { spRelativeOffset = varDsc->GetStackOffset(); } assert(spRelativeOffset >= 0); return spRelativeOffset; } /***************************************************************************** * * Return the caller-SP-relative stack offset of a local/parameter. * Requires the local to be on the stack and frame layout to be complete. */ int Compiler::lvaGetCallerSPRelativeOffset(unsigned varNum) { assert(lvaDoneFrameLayout == FINAL_FRAME_LAYOUT); const LclVarDsc* varDsc = lvaGetDesc(varNum); assert(varDsc->lvOnFrame); return lvaToCallerSPRelativeOffset(varDsc->GetStackOffset(), varDsc->lvFramePointerBased); } //----------------------------------------------------------------------------- // lvaToCallerSPRelativeOffset: translate a frame offset into an offset from // the caller's stack pointer. // // Arguments: // offset - frame offset // isFpBase - if true, offset is from FP, otherwise offset is from SP // forRootFrame - if the current method is an OSR method, adjust the offset // to be relative to the SP for the root method, instead of being relative // to the SP for the OSR method. // // Returins: // suitable offset // int Compiler::lvaToCallerSPRelativeOffset(int offset, bool isFpBased, bool forRootFrame) const { assert(lvaDoneFrameLayout == FINAL_FRAME_LAYOUT); if (isFpBased) { offset += codeGen->genCallerSPtoFPdelta(); } else { offset += codeGen->genCallerSPtoInitialSPdelta(); } #if defined(TARGET_AMD64) || defined(TARGET_ARM64) if (forRootFrame && opts.IsOSR()) { const PatchpointInfo* const ppInfo = info.compPatchpointInfo; #if defined(TARGET_AMD64) // The offset computed above already includes the OSR frame adjustment, plus the // pop of the "pseudo return address" from the OSR frame. // // To get to root method caller-SP, we need to subtract off the tier0 frame // size and the pushed return address and RBP for the tier0 frame (which we know is an // RPB frame). // // ppInfo's TotalFrameSize also accounts for the popped pseudo return address // between the tier0 method frame and the OSR frame. So the net adjustment // is simply TotalFrameSize plus one register. // const int adjustment = ppInfo->TotalFrameSize() + REGSIZE_BYTES; #elif defined(TARGET_ARM64) const int adjustment = ppInfo->TotalFrameSize(); #endif offset -= adjustment; } #else // OSR NYI for other targets. assert(!opts.IsOSR()); #endif return offset; } /***************************************************************************** * * Return the Initial-SP-relative stack offset of a local/parameter. * Requires the local to be on the stack and frame layout to be complete. */ int Compiler::lvaGetInitialSPRelativeOffset(unsigned varNum) { assert(lvaDoneFrameLayout == FINAL_FRAME_LAYOUT); const LclVarDsc* varDsc = lvaGetDesc(varNum); assert(varDsc->lvOnFrame); return lvaToInitialSPRelativeOffset(varDsc->GetStackOffset(), varDsc->lvFramePointerBased); } // Given a local variable offset, and whether that offset is frame-pointer based, return its offset from Initial-SP. // This is used, for example, to figure out the offset of the frame pointer from Initial-SP. int Compiler::lvaToInitialSPRelativeOffset(unsigned offset, bool isFpBased) { assert(lvaDoneFrameLayout == FINAL_FRAME_LAYOUT); #ifdef TARGET_AMD64 if (isFpBased) { // Currently, the frame starts by pushing ebp, ebp points to the saved ebp // (so we have ebp pointer chaining). Add the fixed-size frame size plus the // size of the callee-saved regs (not including ebp itself) to find Initial-SP. assert(codeGen->isFramePointerUsed()); offset += codeGen->genSPtoFPdelta(); } else { // The offset is correct already! } #else // !TARGET_AMD64 NYI("lvaToInitialSPRelativeOffset"); #endif // !TARGET_AMD64 return offset; } /*****************************************************************************/ #ifdef DEBUG /***************************************************************************** * Pick a padding size at "random" for the local. * 0 means that it should not be converted to a GT_LCL_FLD */ static unsigned LCL_FLD_PADDING(unsigned lclNum) { // Convert every 2nd variable if (lclNum % 2) { return 0; } // Pick a padding size at "random" unsigned size = lclNum % 7; return size; } /***************************************************************************** * * Callback for fgWalkAllTreesPre() * Convert as many GT_LCL_VAR's to GT_LCL_FLD's */ /* static */ /* The stress mode does 2 passes. In the first pass we will mark the locals where we CAN't apply the stress mode. In the second pass we will do the appropiate morphing wherever we've not determined we can't do it. */ Compiler::fgWalkResult Compiler::lvaStressLclFldCB(GenTree** pTree, fgWalkData* data) { GenTree* tree = *pTree; genTreeOps oper = tree->OperGet(); GenTree* lcl; switch (oper) { case GT_LCL_VAR: case GT_LCL_VAR_ADDR: lcl = tree; break; case GT_ADDR: if (tree->AsOp()->gtOp1->gtOper != GT_LCL_VAR) { return WALK_CONTINUE; } lcl = tree->AsOp()->gtOp1; break; default: return WALK_CONTINUE; } noway_assert(lcl->OperIs(GT_LCL_VAR, GT_LCL_VAR_ADDR)); Compiler* const pComp = ((lvaStressLclFldArgs*)data->pCallbackData)->m_pCompiler; const bool bFirstPass = ((lvaStressLclFldArgs*)data->pCallbackData)->m_bFirstPass; const unsigned lclNum = lcl->AsLclVarCommon()->GetLclNum(); var_types type = lcl->TypeGet(); LclVarDsc* const varDsc = pComp->lvaGetDesc(lclNum); if (varDsc->lvNoLclFldStress) { // Already determined we can't do anything for this var return WALK_SKIP_SUBTREES; } if (bFirstPass) { // Ignore arguments and temps if (varDsc->lvIsParam || lclNum >= pComp->info.compLocalsCount) { varDsc->lvNoLclFldStress = true; return WALK_SKIP_SUBTREES; } // Ignore OSR locals; if in memory, they will live on the // Tier0 frame and so can't have their storage adjusted. // if (pComp->lvaIsOSRLocal(lclNum)) { varDsc->lvNoLclFldStress = true; return WALK_SKIP_SUBTREES; } // Likewise for Tier0 methods with patchpoints -- // if we modify them we'll misreport their locations in the patchpoint info. // if (pComp->doesMethodHavePatchpoints() || pComp->doesMethodHavePartialCompilationPatchpoints()) { varDsc->lvNoLclFldStress = true; return WALK_SKIP_SUBTREES; } // Fix for lcl_fld stress mode if (varDsc->lvKeepType) { varDsc->lvNoLclFldStress = true; return WALK_SKIP_SUBTREES; } // Can't have GC ptrs in TYP_BLK. if (!varTypeIsArithmetic(type)) { varDsc->lvNoLclFldStress = true; return WALK_SKIP_SUBTREES; } // The noway_assert in the second pass below, requires that these types match, or we have a TYP_BLK // if ((varDsc->lvType != lcl->gtType) && (varDsc->lvType != TYP_BLK)) { varDsc->lvNoLclFldStress = true; return WALK_SKIP_SUBTREES; } // Weed out "small" types like TYP_BYTE as we don't mark the GT_LCL_VAR // node with the accurate small type. If we bash lvaTable[].lvType, // then there will be no indication that it was ever a small type. var_types varType = varDsc->TypeGet(); if (varType != TYP_BLK && genTypeSize(varType) != genTypeSize(genActualType(varType))) { varDsc->lvNoLclFldStress = true; return WALK_SKIP_SUBTREES; } // Offset some of the local variable by a "random" non-zero amount unsigned padding = LCL_FLD_PADDING(lclNum); if (padding == 0) { varDsc->lvNoLclFldStress = true; return WALK_SKIP_SUBTREES; } } else { // Do the morphing noway_assert((varDsc->lvType == lcl->gtType) || (varDsc->lvType == TYP_BLK)); var_types varType = varDsc->TypeGet(); // Calculate padding unsigned padding = LCL_FLD_PADDING(lclNum); #ifdef TARGET_ARMARCH // We need to support alignment requirements to access memory on ARM ARCH unsigned alignment = 1; pComp->codeGen->InferOpSizeAlign(lcl, &alignment); alignment = roundUp(alignment, TARGET_POINTER_SIZE); padding = roundUp(padding, alignment); #endif // TARGET_ARMARCH // Change the variable to a TYP_BLK if (varType != TYP_BLK) { varDsc->lvExactSize = roundUp(padding + pComp->lvaLclSize(lclNum), TARGET_POINTER_SIZE); varDsc->lvType = TYP_BLK; pComp->lvaSetVarAddrExposed(lclNum DEBUGARG(AddressExposedReason::STRESS_LCL_FLD)); } tree->gtFlags |= GTF_GLOB_REF; /* Now morph the tree appropriately */ if (oper == GT_LCL_VAR) { /* Change lclVar(lclNum) to lclFld(lclNum,padding) */ tree->ChangeOper(GT_LCL_FLD); tree->AsLclFld()->SetLclOffs(padding); } else if (oper == GT_LCL_VAR_ADDR) { tree->ChangeOper(GT_LCL_FLD_ADDR); tree->AsLclFld()->SetLclOffs(padding); } else { /* Change addr(lclVar) to addr(lclVar)+padding */ noway_assert(oper == GT_ADDR); GenTree* paddingTree = pComp->gtNewIconNode(padding); GenTree* newAddr = pComp->gtNewOperNode(GT_ADD, tree->gtType, tree, paddingTree); *pTree = newAddr; lcl->gtType = TYP_BLK; } } return WALK_SKIP_SUBTREES; } /*****************************************************************************/ void Compiler::lvaStressLclFld() { if (!compStressCompile(STRESS_LCL_FLDS, 5)) { return; } lvaStressLclFldArgs Args; Args.m_pCompiler = this; Args.m_bFirstPass = true; // Do First pass fgWalkAllTreesPre(lvaStressLclFldCB, &Args); // Second pass Args.m_bFirstPass = false; fgWalkAllTreesPre(lvaStressLclFldCB, &Args); } #endif // DEBUG /***************************************************************************** * * A little routine that displays a local variable bitset. * 'set' is mask of variables that have to be displayed * 'allVars' is the complete set of interesting variables (blank space is * inserted if its corresponding bit is not in 'set'). */ #ifdef DEBUG void Compiler::lvaDispVarSet(VARSET_VALARG_TP set) { VARSET_TP allVars(VarSetOps::MakeEmpty(this)); lvaDispVarSet(set, allVars); } void Compiler::lvaDispVarSet(VARSET_VALARG_TP set, VARSET_VALARG_TP allVars) { printf("{"); bool needSpace = false; for (unsigned index = 0; index < lvaTrackedCount; index++) { if (VarSetOps::IsMember(this, set, index)) { unsigned lclNum; LclVarDsc* varDsc; /* Look for the matching variable */ for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++) { if ((varDsc->lvVarIndex == index) && varDsc->lvTracked) { break; } } if (needSpace) { printf(" "); } else { needSpace = true; } printf("V%02u", lclNum); } else if (VarSetOps::IsMember(this, allVars, index)) { if (needSpace) { printf(" "); } else { needSpace = true; } printf(" "); } } printf("}"); } #endif // DEBUG
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX LclVarsInfo XX XX XX XX The variables to be used by the code generator. XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif #include "emit.h" #include "register_arg_convention.h" #include "jitstd/algorithm.h" #include "patchpointinfo.h" /*****************************************************************************/ #ifdef DEBUG #if DOUBLE_ALIGN /* static */ unsigned Compiler::s_lvaDoubleAlignedProcsCount = 0; #endif #endif /*****************************************************************************/ void Compiler::lvaInit() { /* We haven't allocated stack variables yet */ lvaRefCountState = RCS_INVALID; lvaGenericsContextInUse = false; lvaTrackedToVarNumSize = 0; lvaTrackedToVarNum = nullptr; lvaTrackedFixed = false; // false: We can still add new tracked variables lvaDoneFrameLayout = NO_FRAME_LAYOUT; #if !defined(FEATURE_EH_FUNCLETS) lvaShadowSPslotsVar = BAD_VAR_NUM; #endif // !FEATURE_EH_FUNCLETS lvaInlinedPInvokeFrameVar = BAD_VAR_NUM; lvaReversePInvokeFrameVar = BAD_VAR_NUM; #if FEATURE_FIXED_OUT_ARGS lvaPInvokeFrameRegSaveVar = BAD_VAR_NUM; lvaOutgoingArgSpaceVar = BAD_VAR_NUM; lvaOutgoingArgSpaceSize = PhasedVar<unsigned>(); #endif // FEATURE_FIXED_OUT_ARGS #ifdef JIT32_GCENCODER lvaLocAllocSPvar = BAD_VAR_NUM; #endif // JIT32_GCENCODER lvaNewObjArrayArgs = BAD_VAR_NUM; lvaGSSecurityCookie = BAD_VAR_NUM; #ifdef TARGET_X86 lvaVarargsBaseOfStkArgs = BAD_VAR_NUM; #endif // TARGET_X86 lvaVarargsHandleArg = BAD_VAR_NUM; lvaStubArgumentVar = BAD_VAR_NUM; lvaArg0Var = BAD_VAR_NUM; lvaMonAcquired = BAD_VAR_NUM; lvaRetAddrVar = BAD_VAR_NUM; lvaInlineeReturnSpillTemp = BAD_VAR_NUM; gsShadowVarInfo = nullptr; #if defined(FEATURE_EH_FUNCLETS) lvaPSPSym = BAD_VAR_NUM; #endif #if FEATURE_SIMD lvaSIMDInitTempVarNum = BAD_VAR_NUM; #endif // FEATURE_SIMD lvaCurEpoch = 0; structPromotionHelper = new (this, CMK_Generic) StructPromotionHelper(this); } /*****************************************************************************/ void Compiler::lvaInitTypeRef() { /* x86 args look something like this: [this ptr] [hidden return buffer] [declared arguments]* [generic context] [var arg cookie] x64 is closer to the native ABI: [this ptr] [hidden return buffer] [generic context] [var arg cookie] [declared arguments]* (Note: prior to .NET Framework 4.5.1 for Windows 8.1 (but not .NET Framework 4.5.1 "downlevel"), the "hidden return buffer" came before the "this ptr". Now, the "this ptr" comes first. This is different from the C++ order, where the "hidden return buffer" always comes first.) ARM and ARM64 are the same as the current x64 convention: [this ptr] [hidden return buffer] [generic context] [var arg cookie] [declared arguments]* Key difference: The var arg cookie and generic context are swapped with respect to the user arguments */ /* Set compArgsCount and compLocalsCount */ info.compArgsCount = info.compMethodInfo->args.numArgs; // Is there a 'this' pointer if (!info.compIsStatic) { info.compArgsCount++; } else { info.compThisArg = BAD_VAR_NUM; } info.compILargsCount = info.compArgsCount; #ifdef FEATURE_SIMD if (supportSIMDTypes() && (info.compRetNativeType == TYP_STRUCT)) { var_types structType = impNormStructType(info.compMethodInfo->args.retTypeClass); info.compRetType = structType; } #endif // FEATURE_SIMD // Are we returning a struct using a return buffer argument? // const bool hasRetBuffArg = impMethodInfo_hasRetBuffArg(info.compMethodInfo, info.compCallConv); // Possibly change the compRetNativeType from TYP_STRUCT to a "primitive" type // when we are returning a struct by value and it fits in one register // if (!hasRetBuffArg && varTypeIsStruct(info.compRetNativeType)) { CORINFO_CLASS_HANDLE retClsHnd = info.compMethodInfo->args.retTypeClass; Compiler::structPassingKind howToReturnStruct; var_types returnType = getReturnTypeForStruct(retClsHnd, info.compCallConv, &howToReturnStruct); // We can safely widen the return type for enclosed structs. if ((howToReturnStruct == SPK_PrimitiveType) || (howToReturnStruct == SPK_EnclosingType)) { assert(returnType != TYP_UNKNOWN); assert(returnType != TYP_STRUCT); info.compRetNativeType = returnType; // ToDo: Refactor this common code sequence into its own method as it is used 4+ times if ((returnType == TYP_LONG) && (compLongUsed == false)) { compLongUsed = true; } else if (((returnType == TYP_FLOAT) || (returnType == TYP_DOUBLE)) && (compFloatingPointUsed == false)) { compFloatingPointUsed = true; } } } // Do we have a RetBuffArg? if (hasRetBuffArg) { info.compArgsCount++; } else { info.compRetBuffArg = BAD_VAR_NUM; } /* There is a 'hidden' cookie pushed last when the calling convention is varargs */ if (info.compIsVarArgs) { info.compArgsCount++; } // Is there an extra parameter used to pass instantiation info to // shared generic methods and shared generic struct instance methods? if (info.compMethodInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) { info.compArgsCount++; } else { info.compTypeCtxtArg = BAD_VAR_NUM; } lvaCount = info.compLocalsCount = info.compArgsCount + info.compMethodInfo->locals.numArgs; info.compILlocalsCount = info.compILargsCount + info.compMethodInfo->locals.numArgs; /* Now allocate the variable descriptor table */ if (compIsForInlining()) { lvaTable = impInlineInfo->InlinerCompiler->lvaTable; lvaCount = impInlineInfo->InlinerCompiler->lvaCount; lvaTableCnt = impInlineInfo->InlinerCompiler->lvaTableCnt; // No more stuff needs to be done. return; } lvaTableCnt = lvaCount * 2; if (lvaTableCnt < 16) { lvaTableCnt = 16; } lvaTable = getAllocator(CMK_LvaTable).allocate<LclVarDsc>(lvaTableCnt); size_t tableSize = lvaTableCnt * sizeof(*lvaTable); memset(lvaTable, 0, tableSize); for (unsigned i = 0; i < lvaTableCnt; i++) { new (&lvaTable[i], jitstd::placement_t()) LclVarDsc(); // call the constructor. } //------------------------------------------------------------------------- // Count the arguments and initialize the respective lvaTable[] entries // // First the implicit arguments //------------------------------------------------------------------------- InitVarDscInfo varDscInfo; #ifdef TARGET_X86 // x86 unmanaged calling conventions limit the number of registers supported // for accepting arguments. As a result, we need to modify the number of registers // when we emit a method with an unmanaged calling convention. switch (info.compCallConv) { case CorInfoCallConvExtension::Thiscall: // In thiscall the this parameter goes into a register. varDscInfo.Init(lvaTable, hasRetBuffArg, 1, 0); break; case CorInfoCallConvExtension::C: case CorInfoCallConvExtension::Stdcall: case CorInfoCallConvExtension::CMemberFunction: case CorInfoCallConvExtension::StdcallMemberFunction: varDscInfo.Init(lvaTable, hasRetBuffArg, 0, 0); break; case CorInfoCallConvExtension::Managed: case CorInfoCallConvExtension::Fastcall: case CorInfoCallConvExtension::FastcallMemberFunction: default: varDscInfo.Init(lvaTable, hasRetBuffArg, MAX_REG_ARG, MAX_FLOAT_REG_ARG); break; } #else varDscInfo.Init(lvaTable, hasRetBuffArg, MAX_REG_ARG, MAX_FLOAT_REG_ARG); #endif lvaInitArgs(&varDscInfo); //------------------------------------------------------------------------- // Finally the local variables //------------------------------------------------------------------------- unsigned varNum = varDscInfo.varNum; LclVarDsc* varDsc = varDscInfo.varDsc; CORINFO_ARG_LIST_HANDLE localsSig = info.compMethodInfo->locals.args; for (unsigned i = 0; i < info.compMethodInfo->locals.numArgs; i++, varNum++, varDsc++, localsSig = info.compCompHnd->getArgNext(localsSig)) { CORINFO_CLASS_HANDLE typeHnd; CorInfoTypeWithMod corInfoTypeWithMod = info.compCompHnd->getArgType(&info.compMethodInfo->locals, localsSig, &typeHnd); CorInfoType corInfoType = strip(corInfoTypeWithMod); lvaInitVarDsc(varDsc, varNum, corInfoType, typeHnd, localsSig, &info.compMethodInfo->locals); if ((corInfoTypeWithMod & CORINFO_TYPE_MOD_PINNED) != 0) { if ((corInfoType == CORINFO_TYPE_CLASS) || (corInfoType == CORINFO_TYPE_BYREF)) { JITDUMP("Setting lvPinned for V%02u\n", varNum); varDsc->lvPinned = 1; } else { JITDUMP("Ignoring pin for non-GC type V%02u\n", varNum); } } varDsc->lvOnFrame = true; // The final home for this local variable might be our local stack frame if (corInfoType == CORINFO_TYPE_CLASS) { CORINFO_CLASS_HANDLE clsHnd = info.compCompHnd->getArgClass(&info.compMethodInfo->locals, localsSig); lvaSetClass(varNum, clsHnd); } if (opts.IsOSR() && info.compPatchpointInfo->IsExposed(varNum)) { JITDUMP("-- V%02u is OSR exposed\n", varNum); varDsc->lvHasLdAddrOp = 1; // todo: Why does it apply only to non-structs? // if (!varTypeIsStruct(varDsc) && !varTypeIsSIMD(varDsc)) { lvaSetVarAddrExposed(varNum DEBUGARG(AddressExposedReason::OSR_EXPOSED)); } } } if ( // If there already exist unsafe buffers, don't mark more structs as unsafe // as that will cause them to be placed along with the real unsafe buffers, // unnecessarily exposing them to overruns. This can affect GS tests which // intentionally do buffer-overruns. !getNeedsGSSecurityCookie() && // GS checks require the stack to be re-ordered, which can't be done with EnC !opts.compDbgEnC && compStressCompile(STRESS_UNSAFE_BUFFER_CHECKS, 25)) { setNeedsGSSecurityCookie(); compGSReorderStackLayout = true; for (unsigned i = 0; i < lvaCount; i++) { if ((lvaTable[i].lvType == TYP_STRUCT) && compStressCompile(STRESS_GENERIC_VARN, 60)) { lvaTable[i].lvIsUnsafeBuffer = true; } } } if (getNeedsGSSecurityCookie()) { // Ensure that there will be at least one stack variable since // we require that the GSCookie does not have a 0 stack offset. unsigned dummy = lvaGrabTempWithImplicitUse(false DEBUGARG("GSCookie dummy")); LclVarDsc* gsCookieDummy = lvaGetDesc(dummy); gsCookieDummy->lvType = TYP_INT; gsCookieDummy->lvIsTemp = true; // It is not alive at all, set the flag to prevent zero-init. lvaSetVarDoNotEnregister(dummy DEBUGARG(DoNotEnregisterReason::VMNeedsStackAddr)); } // Allocate the lvaOutgoingArgSpaceVar now because we can run into problems in the // emitter when the varNum is greater that 32767 (see emitLclVarAddr::initLclVarAddr) lvaAllocOutgoingArgSpaceVar(); #ifdef DEBUG if (verbose) { lvaTableDump(INITIAL_FRAME_LAYOUT); } #endif } /*****************************************************************************/ void Compiler::lvaInitArgs(InitVarDscInfo* varDscInfo) { compArgSize = 0; #if defined(TARGET_ARM) && defined(PROFILING_SUPPORTED) // Prespill all argument regs on to stack in case of Arm when under profiler. if (compIsProfilerHookNeeded()) { codeGen->regSet.rsMaskPreSpillRegArg |= RBM_ARG_REGS; } #endif //---------------------------------------------------------------------- /* Is there a "this" pointer ? */ lvaInitThisPtr(varDscInfo); unsigned numUserArgsToSkip = 0; unsigned numUserArgs = info.compMethodInfo->args.numArgs; #if !defined(TARGET_ARM) if (TargetOS::IsWindows && callConvIsInstanceMethodCallConv(info.compCallConv)) { // If we are a native instance method, handle the first user arg // (the unmanaged this parameter) and then handle the hidden // return buffer parameter. assert(numUserArgs >= 1); lvaInitUserArgs(varDscInfo, 0, 1); numUserArgsToSkip++; numUserArgs--; lvaInitRetBuffArg(varDscInfo, false); } else #endif { /* If we have a hidden return-buffer parameter, that comes here */ lvaInitRetBuffArg(varDscInfo, true); } //====================================================================== #if USER_ARGS_COME_LAST //@GENERICS: final instantiation-info argument for shared generic methods // and shared generic struct instance methods lvaInitGenericsCtxt(varDscInfo); /* If the method is varargs, process the varargs cookie */ lvaInitVarArgsHandle(varDscInfo); #endif //------------------------------------------------------------------------- // Now walk the function signature for the explicit user arguments //------------------------------------------------------------------------- lvaInitUserArgs(varDscInfo, numUserArgsToSkip, numUserArgs); #if !USER_ARGS_COME_LAST //@GENERICS: final instantiation-info argument for shared generic methods // and shared generic struct instance methods lvaInitGenericsCtxt(varDscInfo); /* If the method is varargs, process the varargs cookie */ lvaInitVarArgsHandle(varDscInfo); #endif //---------------------------------------------------------------------- // We have set info.compArgsCount in compCompile() noway_assert(varDscInfo->varNum == info.compArgsCount); assert(varDscInfo->intRegArgNum <= MAX_REG_ARG); codeGen->intRegState.rsCalleeRegArgCount = varDscInfo->intRegArgNum; codeGen->floatRegState.rsCalleeRegArgCount = varDscInfo->floatRegArgNum; #if FEATURE_FASTTAILCALL // Save the stack usage information // We can get register usage information using codeGen->intRegState and // codeGen->floatRegState info.compArgStackSize = varDscInfo->stackArgSize; #endif // FEATURE_FASTTAILCALL // The total argument size must be aligned. noway_assert((compArgSize % TARGET_POINTER_SIZE) == 0); #ifdef TARGET_X86 /* We can not pass more than 2^16 dwords as arguments as the "ret" instruction can only pop 2^16 arguments. Could be handled correctly but it will be very difficult for fully interruptible code */ if (compArgSize != (size_t)(unsigned short)compArgSize) IMPL_LIMITATION("Too many arguments for the \"ret\" instruction to pop"); #endif } /*****************************************************************************/ void Compiler::lvaInitThisPtr(InitVarDscInfo* varDscInfo) { LclVarDsc* varDsc = varDscInfo->varDsc; if (!info.compIsStatic) { varDsc->lvIsParam = 1; varDsc->lvIsPtr = 1; lvaArg0Var = info.compThisArg = varDscInfo->varNum; noway_assert(info.compThisArg == 0); if (eeIsValueClass(info.compClassHnd)) { varDsc->lvType = TYP_BYREF; #ifdef FEATURE_SIMD if (supportSIMDTypes()) { CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF; var_types type = impNormStructType(info.compClassHnd, &simdBaseJitType); if (simdBaseJitType != CORINFO_TYPE_UNDEF) { assert(varTypeIsSIMD(type)); varDsc->lvSIMDType = true; varDsc->SetSimdBaseJitType(simdBaseJitType); varDsc->lvExactSize = genTypeSize(type); } } #endif // FEATURE_SIMD } else { varDsc->lvType = TYP_REF; lvaSetClass(varDscInfo->varNum, info.compClassHnd); } varDsc->lvVerTypeInfo = typeInfo(); // Mark the 'this' pointer for the method varDsc->lvVerTypeInfo.SetIsThisPtr(); varDsc->lvIsRegArg = 1; noway_assert(varDscInfo->intRegArgNum == 0); varDsc->SetArgReg(genMapRegArgNumToRegNum(varDscInfo->allocRegArg(TYP_INT), varDsc->TypeGet())); #if FEATURE_MULTIREG_ARGS varDsc->SetOtherArgReg(REG_NA); #endif varDsc->lvOnFrame = true; // The final home for this incoming register might be our local stack frame #ifdef DEBUG if (verbose) { printf("'this' passed in register %s\n", getRegName(varDsc->GetArgReg())); } #endif compArgSize += TARGET_POINTER_SIZE; varDscInfo->varNum++; varDscInfo->varDsc++; } } /*****************************************************************************/ void Compiler::lvaInitRetBuffArg(InitVarDscInfo* varDscInfo, bool useFixedRetBufReg) { LclVarDsc* varDsc = varDscInfo->varDsc; bool hasRetBuffArg = impMethodInfo_hasRetBuffArg(info.compMethodInfo, info.compCallConv); // These two should always match noway_assert(hasRetBuffArg == varDscInfo->hasRetBufArg); if (hasRetBuffArg) { info.compRetBuffArg = varDscInfo->varNum; varDsc->lvType = TYP_BYREF; varDsc->lvIsParam = 1; varDsc->lvIsRegArg = 0; if (useFixedRetBufReg && hasFixedRetBuffReg()) { varDsc->lvIsRegArg = 1; varDsc->SetArgReg(theFixedRetBuffReg()); } else if (varDscInfo->canEnreg(TYP_INT)) { varDsc->lvIsRegArg = 1; unsigned retBuffArgNum = varDscInfo->allocRegArg(TYP_INT); varDsc->SetArgReg(genMapIntRegArgNumToRegNum(retBuffArgNum)); } #if FEATURE_MULTIREG_ARGS varDsc->SetOtherArgReg(REG_NA); #endif varDsc->lvOnFrame = true; // The final home for this incoming register might be our local stack frame #ifdef FEATURE_SIMD if (supportSIMDTypes() && varTypeIsSIMD(info.compRetType)) { varDsc->lvSIMDType = true; CorInfoType simdBaseJitType = getBaseJitTypeAndSizeOfSIMDType(info.compMethodInfo->args.retTypeClass, &varDsc->lvExactSize); varDsc->SetSimdBaseJitType(simdBaseJitType); assert(varDsc->GetSimdBaseType() != TYP_UNKNOWN); } #endif // FEATURE_SIMD assert(!varDsc->lvIsRegArg || isValidIntArgReg(varDsc->GetArgReg())); #ifdef DEBUG if (varDsc->lvIsRegArg && verbose) { printf("'__retBuf' passed in register %s\n", getRegName(varDsc->GetArgReg())); } #endif /* Update the total argument size, count and varDsc */ compArgSize += TARGET_POINTER_SIZE; varDscInfo->varNum++; varDscInfo->varDsc++; } } //----------------------------------------------------------------------------- // lvaInitUserArgs: // Initialize local var descriptions for incoming user arguments // // Arguments: // varDscInfo - the local var descriptions // skipArgs - the number of user args to skip processing. // takeArgs - the number of user args to process (after skipping skipArgs number of args) // void Compiler::lvaInitUserArgs(InitVarDscInfo* varDscInfo, unsigned skipArgs, unsigned takeArgs) { //------------------------------------------------------------------------- // Walk the function signature for the explicit arguments //------------------------------------------------------------------------- #if defined(TARGET_X86) // Only (some of) the implicit args are enregistered for varargs if (info.compIsVarArgs) { varDscInfo->maxIntRegArgNum = varDscInfo->intRegArgNum; } #elif defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI) // On System V type environment the float registers are not indexed together with the int ones. varDscInfo->floatRegArgNum = varDscInfo->intRegArgNum; #endif // TARGET* CORINFO_ARG_LIST_HANDLE argLst = info.compMethodInfo->args.args; const unsigned argSigLen = info.compMethodInfo->args.numArgs; // We will process at most takeArgs arguments from the signature after skipping skipArgs arguments const int64_t numUserArgs = min(takeArgs, (argSigLen - (int64_t)skipArgs)); // If there are no user args or less than skipArgs args, return here since there's no work to do. if (numUserArgs <= 0) { return; } #ifdef TARGET_ARM regMaskTP doubleAlignMask = RBM_NONE; #endif // TARGET_ARM // Skip skipArgs arguments from the signature. for (unsigned i = 0; i < skipArgs; i++, argLst = info.compCompHnd->getArgNext(argLst)) { ; } // Process each user arg. for (unsigned i = 0; i < numUserArgs; i++, varDscInfo->varNum++, varDscInfo->varDsc++, argLst = info.compCompHnd->getArgNext(argLst)) { LclVarDsc* varDsc = varDscInfo->varDsc; CORINFO_CLASS_HANDLE typeHnd = nullptr; CorInfoTypeWithMod corInfoType = info.compCompHnd->getArgType(&info.compMethodInfo->args, argLst, &typeHnd); varDsc->lvIsParam = 1; lvaInitVarDsc(varDsc, varDscInfo->varNum, strip(corInfoType), typeHnd, argLst, &info.compMethodInfo->args); if (strip(corInfoType) == CORINFO_TYPE_CLASS) { CORINFO_CLASS_HANDLE clsHnd = info.compCompHnd->getArgClass(&info.compMethodInfo->args, argLst); lvaSetClass(varDscInfo->varNum, clsHnd); } // For ARM, ARM64, and AMD64 varargs, all arguments go in integer registers var_types argType = mangleVarArgsType(varDsc->TypeGet()); var_types origArgType = argType; // ARM softfp calling convention should affect only the floating point arguments. // Otherwise there appear too many surplus pre-spills and other memory operations // with the associated locations . bool isSoftFPPreSpill = opts.compUseSoftFP && varTypeIsFloating(varDsc->TypeGet()); unsigned argSize = eeGetArgSize(argLst, &info.compMethodInfo->args); unsigned cSlots = (argSize + TARGET_POINTER_SIZE - 1) / TARGET_POINTER_SIZE; // the total number of slots of this argument bool isHfaArg = false; var_types hfaType = TYP_UNDEF; // Methods that use VarArg or SoftFP cannot have HFA arguments except // Native varargs on arm64 unix use the regular calling convention. if (((TargetOS::IsUnix && TargetArchitecture::IsArm64) || !info.compIsVarArgs) && !opts.compUseSoftFP) { // If the argType is a struct, then check if it is an HFA if (varTypeIsStruct(argType)) { // hfaType is set to float, double, or SIMD type if it is an HFA, otherwise TYP_UNDEF hfaType = GetHfaType(typeHnd); isHfaArg = varTypeIsValidHfaType(hfaType); } } else if (info.compIsVarArgs) { // Currently native varargs is not implemented on non windows targets. // // Note that some targets like Arm64 Unix should not need much work as // the ABI is the same. While other targets may only need small changes // such as amd64 Unix, which just expects RAX to pass numFPArguments. if (TargetOS::IsUnix) { NYI("InitUserArgs for Vararg callee is not yet implemented on non Windows targets."); } } if (isHfaArg) { // We have an HFA argument, so from here on out treat the type as a float, double, or vector. // The orginal struct type is available by using origArgType. // We also update the cSlots to be the number of float/double/vector fields in the HFA. argType = hfaType; // TODO-Cleanup: remove this asignment and mark `argType` as const. varDsc->SetHfaType(hfaType); cSlots = varDsc->lvHfaSlots(); } // The number of slots that must be enregistered if we are to consider this argument enregistered. // This is normally the same as cSlots, since we normally either enregister the entire object, // or none of it. For structs on ARM, however, we only need to enregister a single slot to consider // it enregistered, as long as we can split the rest onto the stack. unsigned cSlotsToEnregister = cSlots; #if defined(TARGET_ARM64) if (compFeatureArgSplit()) { // On arm64 Windows we will need to properly handle the case where a >8byte <=16byte // struct is split between register r7 and virtual stack slot s[0] // We will only do this for calls to vararg methods on Windows Arm64 // // !!This does not affect the normal arm64 calling convention or Unix Arm64!! if (this->info.compIsVarArgs && argType == TYP_STRUCT) { if (varDscInfo->canEnreg(TYP_INT, 1) && // The beginning of the struct can go in a register !varDscInfo->canEnreg(TYP_INT, cSlots)) // The end of the struct can't fit in a register { cSlotsToEnregister = 1; // Force the split } } } #endif // defined(TARGET_ARM64) #ifdef TARGET_ARM // On ARM we pass the first 4 words of integer arguments and non-HFA structs in registers. // But we pre-spill user arguments in varargs methods and structs. // unsigned cAlign; bool preSpill = info.compIsVarArgs || isSoftFPPreSpill; switch (origArgType) { case TYP_STRUCT: assert(varDsc->lvSize() == argSize); cAlign = varDsc->lvStructDoubleAlign ? 2 : 1; // HFA arguments go on the stack frame. They don't get spilled in the prolog like struct // arguments passed in the integer registers but get homed immediately after the prolog. if (!isHfaArg) { // TODO-Arm32-Windows: vararg struct should be forced to split like // ARM64 above. cSlotsToEnregister = 1; // HFAs must be totally enregistered or not, but other structs can be split. preSpill = true; } break; case TYP_DOUBLE: case TYP_LONG: cAlign = 2; break; default: cAlign = 1; break; } if (isRegParamType(argType)) { compArgSize += varDscInfo->alignReg(argType, cAlign) * REGSIZE_BYTES; } if (argType == TYP_STRUCT) { // Are we going to split the struct between registers and stack? We can do that as long as // no floating-point arguments have been put on the stack. // // From the ARM Procedure Call Standard: // Rule C.5: "If the NCRN is less than r4 **and** the NSAA is equal to the SP," // then split the argument between registers and stack. Implication: if something // has already been spilled to the stack, then anything that would normally be // split between the core registers and the stack will be put on the stack. // Anything that follows will also be on the stack. However, if something from // floating point regs has been spilled to the stack, we can still use r0-r3 until they are full. if (varDscInfo->canEnreg(TYP_INT, 1) && // The beginning of the struct can go in a register !varDscInfo->canEnreg(TYP_INT, cSlots) && // The end of the struct can't fit in a register varDscInfo->existAnyFloatStackArgs()) // There's at least one stack-based FP arg already { varDscInfo->setAllRegArgUsed(TYP_INT); // Prevent all future use of integer registers preSpill = false; // This struct won't be prespilled, since it will go on the stack } } if (preSpill) { for (unsigned ix = 0; ix < cSlots; ix++) { if (!varDscInfo->canEnreg(TYP_INT, ix + 1)) { break; } regMaskTP regMask = genMapArgNumToRegMask(varDscInfo->regArgNum(TYP_INT) + ix, TYP_INT); if (cAlign == 2) { doubleAlignMask |= regMask; } codeGen->regSet.rsMaskPreSpillRegArg |= regMask; } } #else // !TARGET_ARM #if defined(UNIX_AMD64_ABI) SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc; if (varTypeIsStruct(argType)) { assert(typeHnd != nullptr); eeGetSystemVAmd64PassStructInRegisterDescriptor(typeHnd, &structDesc); if (structDesc.passedInRegisters) { unsigned intRegCount = 0; unsigned floatRegCount = 0; for (unsigned int i = 0; i < structDesc.eightByteCount; i++) { if (structDesc.IsIntegralSlot(i)) { intRegCount++; } else if (structDesc.IsSseSlot(i)) { floatRegCount++; } else { assert(false && "Invalid eightbyte classification type."); break; } } if (intRegCount != 0 && !varDscInfo->canEnreg(TYP_INT, intRegCount)) { structDesc.passedInRegisters = false; // No register to enregister the eightbytes. } if (floatRegCount != 0 && !varDscInfo->canEnreg(TYP_FLOAT, floatRegCount)) { structDesc.passedInRegisters = false; // No register to enregister the eightbytes. } } } #endif // UNIX_AMD64_ABI #endif // !TARGET_ARM // The final home for this incoming register might be our local stack frame. // For System V platforms the final home will always be on the local stack frame. varDsc->lvOnFrame = true; bool canPassArgInRegisters = false; #if defined(UNIX_AMD64_ABI) if (varTypeIsStruct(argType)) { canPassArgInRegisters = structDesc.passedInRegisters; } else #elif defined(TARGET_X86) if (varTypeIsStruct(argType) && isTrivialPointerSizedStruct(typeHnd)) { canPassArgInRegisters = varDscInfo->canEnreg(TYP_I_IMPL, cSlotsToEnregister); } else #endif // defined(UNIX_AMD64_ABI) { canPassArgInRegisters = varDscInfo->canEnreg(argType, cSlotsToEnregister); } if (canPassArgInRegisters) { /* Another register argument */ // Allocate the registers we need. allocRegArg() returns the first argument register number of the set. // For non-HFA structs, we still "try" to enregister the whole thing; it will just max out if splitting // to the stack happens. unsigned firstAllocatedRegArgNum = 0; #if FEATURE_MULTIREG_ARGS varDsc->SetOtherArgReg(REG_NA); #endif // FEATURE_MULTIREG_ARGS #if defined(UNIX_AMD64_ABI) unsigned secondAllocatedRegArgNum = 0; var_types firstEightByteType = TYP_UNDEF; var_types secondEightByteType = TYP_UNDEF; if (varTypeIsStruct(argType)) { if (structDesc.eightByteCount >= 1) { firstEightByteType = GetEightByteType(structDesc, 0); firstAllocatedRegArgNum = varDscInfo->allocRegArg(firstEightByteType, 1); } } else #endif // defined(UNIX_AMD64_ABI) { firstAllocatedRegArgNum = varDscInfo->allocRegArg(argType, cSlots); } if (isHfaArg) { // We need to save the fact that this HFA is enregistered // Note that we can have HVAs of SIMD types even if we are not recognizing intrinsics. // In that case, we won't have normalized the vector types on the varDsc, so if we have a single vector // register, we need to set the type now. Otherwise, later we'll assume this is passed by reference. if (varDsc->lvHfaSlots() != 1) { varDsc->lvIsMultiRegArg = true; } } varDsc->lvIsRegArg = 1; #if FEATURE_MULTIREG_ARGS #ifdef TARGET_ARM64 if (argType == TYP_STRUCT) { varDsc->SetArgReg(genMapRegArgNumToRegNum(firstAllocatedRegArgNum, TYP_I_IMPL)); if (cSlots == 2) { varDsc->SetOtherArgReg(genMapRegArgNumToRegNum(firstAllocatedRegArgNum + 1, TYP_I_IMPL)); varDsc->lvIsMultiRegArg = true; } } #elif defined(UNIX_AMD64_ABI) if (varTypeIsStruct(argType)) { varDsc->SetArgReg(genMapRegArgNumToRegNum(firstAllocatedRegArgNum, firstEightByteType)); // If there is a second eightbyte, get a register for it too and map the arg to the reg number. if (structDesc.eightByteCount >= 2) { secondEightByteType = GetEightByteType(structDesc, 1); secondAllocatedRegArgNum = varDscInfo->allocRegArg(secondEightByteType, 1); varDsc->lvIsMultiRegArg = true; } if (secondEightByteType != TYP_UNDEF) { varDsc->SetOtherArgReg(genMapRegArgNumToRegNum(secondAllocatedRegArgNum, secondEightByteType)); } } #else // ARM32 if (varTypeIsStruct(argType)) { varDsc->SetArgReg(genMapRegArgNumToRegNum(firstAllocatedRegArgNum, TYP_I_IMPL)); } #endif // ARM32 else #endif // FEATURE_MULTIREG_ARGS { varDsc->SetArgReg(genMapRegArgNumToRegNum(firstAllocatedRegArgNum, argType)); } #ifdef TARGET_ARM if (varDsc->TypeGet() == TYP_LONG) { varDsc->SetOtherArgReg(genMapRegArgNumToRegNum(firstAllocatedRegArgNum + 1, TYP_INT)); } #if FEATURE_FASTTAILCALL // Check if arg was split between registers and stack. if (!varTypeUsesFloatReg(argType)) { unsigned firstRegArgNum = genMapIntRegNumToRegArgNum(varDsc->GetArgReg()); unsigned lastRegArgNum = firstRegArgNum + cSlots - 1; if (lastRegArgNum >= varDscInfo->maxIntRegArgNum) { assert(varDscInfo->stackArgSize == 0); unsigned numEnregistered = varDscInfo->maxIntRegArgNum - firstRegArgNum; varDsc->SetStackOffset(-(int)numEnregistered * REGSIZE_BYTES); varDscInfo->stackArgSize += (cSlots - numEnregistered) * REGSIZE_BYTES; JITDUMP("set user arg V%02u offset to %d\n", varDscInfo->varNum, varDsc->GetStackOffset()); } } #endif #endif // TARGET_ARM #ifdef DEBUG if (verbose) { printf("Arg #%u passed in register(s) ", varDscInfo->varNum); #if defined(UNIX_AMD64_ABI) if (varTypeIsStruct(argType)) { // Print both registers, just to be clear if (firstEightByteType == TYP_UNDEF) { printf("firstEightByte: <not used>"); } else { printf("firstEightByte: %s", getRegName(genMapRegArgNumToRegNum(firstAllocatedRegArgNum, firstEightByteType))); } if (secondEightByteType == TYP_UNDEF) { printf(", secondEightByte: <not used>"); } else { printf(", secondEightByte: %s", getRegName(genMapRegArgNumToRegNum(secondAllocatedRegArgNum, secondEightByteType))); } } else #endif // defined(UNIX_AMD64_ABI) { bool isFloat = varTypeUsesFloatReg(argType); unsigned regArgNum = genMapRegNumToRegArgNum(varDsc->GetArgReg(), argType); for (unsigned ix = 0; ix < cSlots; ix++, regArgNum++) { if (ix > 0) { printf(","); } if (!isFloat && (regArgNum >= varDscInfo->maxIntRegArgNum)) // a struct has been split between // registers and stack { printf(" stack slots:%d", cSlots - ix); break; } #ifdef TARGET_ARM if (isFloat) { // Print register size prefix if (argType == TYP_DOUBLE) { // Print both registers, just to be clear printf("%s/%s", getRegName(genMapRegArgNumToRegNum(regArgNum, argType)), getRegName(genMapRegArgNumToRegNum(regArgNum + 1, argType))); // doubles take 2 slots assert(ix + 1 < cSlots); ++ix; ++regArgNum; } else { printf("%s", getRegName(genMapRegArgNumToRegNum(regArgNum, argType))); } } else #endif // TARGET_ARM { printf("%s", getRegName(genMapRegArgNumToRegNum(regArgNum, argType))); } } } printf("\n"); } #endif // DEBUG } // end if (canPassArgInRegisters) else { #if defined(TARGET_ARM) varDscInfo->setAllRegArgUsed(argType); if (varTypeUsesFloatReg(argType)) { varDscInfo->setAnyFloatStackArgs(); } #elif defined(TARGET_ARM64) // If we needed to use the stack in order to pass this argument then // record the fact that we have used up any remaining registers of this 'type' // This prevents any 'backfilling' from occuring on ARM64 // varDscInfo->setAllRegArgUsed(argType); #endif // TARGET_XXX #if FEATURE_FASTTAILCALL #ifdef TARGET_ARM unsigned argAlignment = cAlign * TARGET_POINTER_SIZE; #else unsigned argAlignment = eeGetArgSizeAlignment(origArgType, (hfaType == TYP_FLOAT)); // We expect the following rounding operation to be a noop on all // ABIs except ARM (where we have 8-byte aligned args) and macOS // ARM64 (that allows to pack multiple smaller parameters in a // single stack slot). assert(compMacOsArm64Abi() || ((varDscInfo->stackArgSize % argAlignment) == 0)); #endif varDscInfo->stackArgSize = roundUp(varDscInfo->stackArgSize, argAlignment); JITDUMP("set user arg V%02u offset to %u\n", varDscInfo->varNum, varDscInfo->stackArgSize); varDsc->SetStackOffset(varDscInfo->stackArgSize); varDscInfo->stackArgSize += argSize; #endif // FEATURE_FASTTAILCALL } #ifdef UNIX_AMD64_ABI // The arg size is returning the number of bytes of the argument. For a struct it could return a size not a // multiple of TARGET_POINTER_SIZE. The stack allocated space should always be multiple of TARGET_POINTER_SIZE, // so round it up. compArgSize += roundUp(argSize, TARGET_POINTER_SIZE); #else // !UNIX_AMD64_ABI compArgSize += argSize; #endif // !UNIX_AMD64_ABI if (info.compIsVarArgs || isSoftFPPreSpill) { #if defined(TARGET_X86) varDsc->SetStackOffset(compArgSize); #else // !TARGET_X86 // TODO-CQ: We shouldn't have to go as far as to declare these // address-exposed -- DoNotEnregister should suffice. lvaSetVarAddrExposed(varDscInfo->varNum DEBUGARG(AddressExposedReason::TOO_CONSERVATIVE)); #endif // !TARGET_X86 } if (opts.IsOSR() && info.compPatchpointInfo->IsExposed(varDscInfo->varNum)) { JITDUMP("-- V%02u is OSR exposed\n", varDscInfo->varNum); varDsc->lvHasLdAddrOp = 1; lvaSetVarAddrExposed(varDscInfo->varNum DEBUGARG(AddressExposedReason::OSR_EXPOSED)); } } compArgSize = GetOutgoingArgByteSize(compArgSize); #ifdef TARGET_ARM if (doubleAlignMask != RBM_NONE) { assert(RBM_ARG_REGS == 0xF); assert((doubleAlignMask & RBM_ARG_REGS) == doubleAlignMask); if (doubleAlignMask != RBM_NONE && doubleAlignMask != RBM_ARG_REGS) { // 'double aligned types' can begin only at r0 or r2 and we always expect at least two registers to be used // Note that in rare cases, we can have double-aligned structs of 12 bytes (if specified explicitly with // attributes) assert((doubleAlignMask == 0b0011) || (doubleAlignMask == 0b1100) || (doubleAlignMask == 0b0111) /* || 0b1111 is if'ed out */); // Now if doubleAlignMask is xyz1 i.e., the struct starts in r0, and we prespill r2 or r3 // but not both, then the stack would be misaligned for r0. So spill both // r2 and r3. // // ; +0 --- caller SP double aligned ---- // ; -4 r2 r3 // ; -8 r1 r1 // ; -c r0 r0 <-- misaligned. // ; callee saved regs bool startsAtR0 = (doubleAlignMask & 1) == 1; bool r2XorR3 = ((codeGen->regSet.rsMaskPreSpillRegArg & RBM_R2) == 0) != ((codeGen->regSet.rsMaskPreSpillRegArg & RBM_R3) == 0); if (startsAtR0 && r2XorR3) { codeGen->regSet.rsMaskPreSpillAlign = (~codeGen->regSet.rsMaskPreSpillRegArg & ~doubleAlignMask) & RBM_ARG_REGS; } } } #endif // TARGET_ARM } /*****************************************************************************/ void Compiler::lvaInitGenericsCtxt(InitVarDscInfo* varDscInfo) { //@GENERICS: final instantiation-info argument for shared generic methods // and shared generic struct instance methods if (info.compMethodInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) { info.compTypeCtxtArg = varDscInfo->varNum; LclVarDsc* varDsc = varDscInfo->varDsc; varDsc->lvIsParam = 1; varDsc->lvType = TYP_I_IMPL; if (varDscInfo->canEnreg(TYP_I_IMPL)) { /* Another register argument */ varDsc->lvIsRegArg = 1; varDsc->SetArgReg(genMapRegArgNumToRegNum(varDscInfo->regArgNum(TYP_INT), varDsc->TypeGet())); #if FEATURE_MULTIREG_ARGS varDsc->SetOtherArgReg(REG_NA); #endif varDsc->lvOnFrame = true; // The final home for this incoming register might be our local stack frame varDscInfo->intRegArgNum++; #ifdef DEBUG if (verbose) { printf("'GenCtxt' passed in register %s\n", getRegName(varDsc->GetArgReg())); } #endif } else { // We need to mark these as being on the stack, as this is not done elsewhere in the case that canEnreg // returns false. varDsc->lvOnFrame = true; #if FEATURE_FASTTAILCALL varDsc->SetStackOffset(varDscInfo->stackArgSize); varDscInfo->stackArgSize += TARGET_POINTER_SIZE; #endif // FEATURE_FASTTAILCALL } compArgSize += TARGET_POINTER_SIZE; #if defined(TARGET_X86) if (info.compIsVarArgs) varDsc->SetStackOffset(compArgSize); #endif // TARGET_X86 varDscInfo->varNum++; varDscInfo->varDsc++; } } /*****************************************************************************/ void Compiler::lvaInitVarArgsHandle(InitVarDscInfo* varDscInfo) { if (info.compIsVarArgs) { lvaVarargsHandleArg = varDscInfo->varNum; LclVarDsc* varDsc = varDscInfo->varDsc; varDsc->lvType = TYP_I_IMPL; varDsc->lvIsParam = 1; #if defined(TARGET_X86) // Codegen will need it for x86 scope info. varDsc->lvImplicitlyReferenced = 1; #endif // TARGET_X86 lvaSetVarDoNotEnregister(lvaVarargsHandleArg DEBUGARG(DoNotEnregisterReason::VMNeedsStackAddr)); assert(mostRecentlyActivePhase == PHASE_PRE_IMPORT); // TODO-Cleanup: this is preImportation phase, why do we try to work with regs here? // Should it be just deleted? if (varDscInfo->canEnreg(TYP_I_IMPL)) { /* Another register argument */ unsigned varArgHndArgNum = varDscInfo->allocRegArg(TYP_I_IMPL); varDsc->lvIsRegArg = 1; varDsc->SetArgReg(genMapRegArgNumToRegNum(varArgHndArgNum, TYP_I_IMPL)); #if FEATURE_MULTIREG_ARGS varDsc->SetOtherArgReg(REG_NA); #endif varDsc->lvOnFrame = true; // The final home for this incoming register might be our local stack frame #ifdef TARGET_ARM // This has to be spilled right in front of the real arguments and we have // to pre-spill all the argument registers explicitly because we only have // have symbols for the declared ones, not any potential variadic ones. for (unsigned ix = varArgHndArgNum; ix < ArrLen(intArgMasks); ix++) { codeGen->regSet.rsMaskPreSpillRegArg |= intArgMasks[ix]; } #endif // TARGET_ARM #ifdef DEBUG if (verbose) { printf("'VarArgHnd' passed in register %s\n", getRegName(varDsc->GetArgReg())); } #endif // DEBUG } else { // We need to mark these as being on the stack, as this is not done elsewhere in the case that canEnreg // returns false. varDsc->lvOnFrame = true; #if FEATURE_FASTTAILCALL varDsc->SetStackOffset(varDscInfo->stackArgSize); varDscInfo->stackArgSize += TARGET_POINTER_SIZE; #endif // FEATURE_FASTTAILCALL } /* Update the total argument size, count and varDsc */ compArgSize += TARGET_POINTER_SIZE; varDscInfo->varNum++; varDscInfo->varDsc++; #if defined(TARGET_X86) varDsc->SetStackOffset(compArgSize); // Allocate a temp to point at the beginning of the args lvaVarargsBaseOfStkArgs = lvaGrabTemp(false DEBUGARG("Varargs BaseOfStkArgs")); lvaTable[lvaVarargsBaseOfStkArgs].lvType = TYP_I_IMPL; #endif // TARGET_X86 } } /*****************************************************************************/ void Compiler::lvaInitVarDsc(LclVarDsc* varDsc, unsigned varNum, CorInfoType corInfoType, CORINFO_CLASS_HANDLE typeHnd, CORINFO_ARG_LIST_HANDLE varList, CORINFO_SIG_INFO* varSig) { noway_assert(varDsc == lvaGetDesc(varNum)); switch (corInfoType) { // Mark types that looks like a pointer for doing shadow-copying of // parameters if we have an unsafe buffer. // Note that this does not handle structs with pointer fields. Instead, // we rely on using the assign-groups/equivalence-groups in // gsFindVulnerableParams() to determine if a buffer-struct contains a // pointer. We could do better by having the EE determine this for us. // Note that we want to keep buffers without pointers at lower memory // addresses than buffers with pointers. case CORINFO_TYPE_PTR: case CORINFO_TYPE_BYREF: case CORINFO_TYPE_CLASS: case CORINFO_TYPE_STRING: case CORINFO_TYPE_VAR: case CORINFO_TYPE_REFANY: varDsc->lvIsPtr = 1; break; default: break; } var_types type = JITtype2varType(corInfoType); if (varTypeIsFloating(type)) { compFloatingPointUsed = true; } if (typeHnd) { unsigned cFlags = info.compCompHnd->getClassAttribs(typeHnd); // We can get typeHnds for primitive types, these are value types which only contain // a primitive. We will need the typeHnd to distinguish them, so we store it here. if ((cFlags & CORINFO_FLG_VALUECLASS) && !varTypeIsStruct(type)) { // printf("This is a struct that the JIT will treat as a primitive\n"); varDsc->lvVerTypeInfo = verMakeTypeInfo(typeHnd); } varDsc->lvOverlappingFields = StructHasOverlappingFields(cFlags); } #if defined(TARGET_AMD64) || defined(TARGET_ARM64) varDsc->lvIsImplicitByRef = 0; #endif // defined(TARGET_AMD64) || defined(TARGET_ARM64) // Set the lvType (before this point it is TYP_UNDEF). if (GlobalJitOptions::compFeatureHfa) { varDsc->SetHfaType(TYP_UNDEF); } if ((varTypeIsStruct(type))) { lvaSetStruct(varNum, typeHnd, typeHnd != nullptr, true); if (info.compIsVarArgs) { lvaSetStructUsedAsVarArg(varNum); } } else { varDsc->lvType = type; } if (type == TYP_BOOL) { varDsc->lvIsBoolean = true; } #ifdef DEBUG varDsc->SetStackOffset(BAD_STK_OFFS); #endif #if FEATURE_MULTIREG_ARGS varDsc->SetOtherArgReg(REG_NA); #endif // FEATURE_MULTIREG_ARGS } /***************************************************************************** * Returns our internal varNum for a given IL variable. * Asserts assume it is called after lvaTable[] has been set up. */ unsigned Compiler::compMapILvarNum(unsigned ILvarNum) { noway_assert(ILvarNum < info.compILlocalsCount || ILvarNum > unsigned(ICorDebugInfo::UNKNOWN_ILNUM)); unsigned varNum; if (ILvarNum == (unsigned)ICorDebugInfo::VARARGS_HND_ILNUM) { // The varargs cookie is the last argument in lvaTable[] noway_assert(info.compIsVarArgs); varNum = lvaVarargsHandleArg; noway_assert(lvaTable[varNum].lvIsParam); } else if (ILvarNum == (unsigned)ICorDebugInfo::RETBUF_ILNUM) { noway_assert(info.compRetBuffArg != BAD_VAR_NUM); varNum = info.compRetBuffArg; } else if (ILvarNum == (unsigned)ICorDebugInfo::TYPECTXT_ILNUM) { noway_assert(info.compTypeCtxtArg >= 0); varNum = unsigned(info.compTypeCtxtArg); } else if (ILvarNum < info.compILargsCount) { // Parameter varNum = compMapILargNum(ILvarNum); noway_assert(lvaTable[varNum].lvIsParam); } else if (ILvarNum < info.compILlocalsCount) { // Local variable unsigned lclNum = ILvarNum - info.compILargsCount; varNum = info.compArgsCount + lclNum; noway_assert(!lvaTable[varNum].lvIsParam); } else { unreached(); } noway_assert(varNum < info.compLocalsCount); return varNum; } /***************************************************************************** * Returns the IL variable number given our internal varNum. * Special return values are VARG_ILNUM, RETBUF_ILNUM, TYPECTXT_ILNUM. * * Returns UNKNOWN_ILNUM if it can't be mapped. */ unsigned Compiler::compMap2ILvarNum(unsigned varNum) const { if (compIsForInlining()) { return impInlineInfo->InlinerCompiler->compMap2ILvarNum(varNum); } noway_assert(varNum < lvaCount); if (varNum == info.compRetBuffArg) { return (unsigned)ICorDebugInfo::RETBUF_ILNUM; } // Is this a varargs function? if (info.compIsVarArgs && varNum == lvaVarargsHandleArg) { return (unsigned)ICorDebugInfo::VARARGS_HND_ILNUM; } // We create an extra argument for the type context parameter // needed for shared generic code. if ((info.compMethodInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) && varNum == (unsigned)info.compTypeCtxtArg) { return (unsigned)ICorDebugInfo::TYPECTXT_ILNUM; } #if FEATURE_FIXED_OUT_ARGS if (varNum == lvaOutgoingArgSpaceVar) { return (unsigned)ICorDebugInfo::UNKNOWN_ILNUM; // Cannot be mapped } #endif // FEATURE_FIXED_OUT_ARGS // Now mutate varNum to remove extra parameters from the count. if ((info.compMethodInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) && varNum > (unsigned)info.compTypeCtxtArg) { varNum--; } if (info.compIsVarArgs && varNum > lvaVarargsHandleArg) { varNum--; } /* Is there a hidden argument for the return buffer. Note that this code works because if the RetBuffArg is not present, compRetBuffArg will be BAD_VAR_NUM */ if (info.compRetBuffArg != BAD_VAR_NUM && varNum > info.compRetBuffArg) { varNum--; } if (varNum >= info.compLocalsCount) { return (unsigned)ICorDebugInfo::UNKNOWN_ILNUM; // Cannot be mapped } return varNum; } /***************************************************************************** * Returns true if variable "varNum" may be address-exposed. */ bool Compiler::lvaVarAddrExposed(unsigned varNum) const { const LclVarDsc* varDsc = lvaGetDesc(varNum); return varDsc->IsAddressExposed(); } /***************************************************************************** * Returns true iff variable "varNum" should not be enregistered (or one of several reasons). */ bool Compiler::lvaVarDoNotEnregister(unsigned varNum) { LclVarDsc* varDsc = lvaGetDesc(varNum); return varDsc->lvDoNotEnregister; } //------------------------------------------------------------------------ // lvInitializeDoNotEnregFlag: a helper to initialize `lvDoNotEnregister` flag // for locals that were created before the compiler decided its optimization level. // // Assumptions: // compEnregLocals() value is finalized and is set to false. // void Compiler::lvSetMinOptsDoNotEnreg() { JITDUMP("compEnregLocals() is false, setting doNotEnreg flag for all locals."); assert(!compEnregLocals()); for (unsigned lclNum = 0; lclNum < lvaCount; lclNum++) { lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::NoRegVars)); } } /***************************************************************************** * Returns the handle to the class of the local variable varNum */ CORINFO_CLASS_HANDLE Compiler::lvaGetStruct(unsigned varNum) { const LclVarDsc* varDsc = lvaGetDesc(varNum); return varDsc->GetStructHnd(); } //-------------------------------------------------------------------------------------------- // lvaFieldOffsetCmp - a static compare function passed to jitstd::sort() by Compiler::StructPromotionHelper; // compares fields' offsets. // // Arguments: // field1 - pointer to the first field; // field2 - pointer to the second field. // // Return value: // 0 if the fields' offsets are equal, 1 if the first field has bigger offset, -1 otherwise. // bool Compiler::lvaFieldOffsetCmp::operator()(const lvaStructFieldInfo& field1, const lvaStructFieldInfo& field2) { return field1.fldOffset < field2.fldOffset; } //------------------------------------------------------------------------ // StructPromotionHelper constructor. // // Arguments: // compiler - pointer to a compiler to get access to an allocator, compHandle etc. // Compiler::StructPromotionHelper::StructPromotionHelper(Compiler* compiler) : compiler(compiler) , structPromotionInfo() #ifdef DEBUG , retypedFieldsMap(compiler->getAllocator(CMK_DebugOnly)) #endif // DEBUG { } //-------------------------------------------------------------------------------------------- // TryPromoteStructVar - promote struct var if it is possible and profitable. // // Arguments: // lclNum - struct number to try. // // Return value: // true if the struct var was promoted. // bool Compiler::StructPromotionHelper::TryPromoteStructVar(unsigned lclNum) { if (CanPromoteStructVar(lclNum)) { #if 0 // Often-useful debugging code: if you've narrowed down a struct-promotion problem to a single // method, this allows you to select a subset of the vars to promote (by 1-based ordinal number). static int structPromoVarNum = 0; structPromoVarNum++; if (atoi(getenv("structpromovarnumlo")) <= structPromoVarNum && structPromoVarNum <= atoi(getenv("structpromovarnumhi"))) #endif // 0 if (ShouldPromoteStructVar(lclNum)) { PromoteStructVar(lclNum); return true; } } return false; } #ifdef DEBUG //-------------------------------------------------------------------------------------------- // CheckRetypedAsScalar - check that the fldType for this fieldHnd was retyped as requested type. // // Arguments: // fieldHnd - the field handle; // requestedType - as which type the field was accessed; // // Notes: // For example it can happen when such struct A { struct B { long c } } is compiled and we access A.B.c, // it could look like "GT_FIELD struct B.c -> ADDR -> GT_FIELD struct A.B -> ADDR -> LCL_VAR A" , but // "GT_FIELD struct A.B -> ADDR -> LCL_VAR A" can be promoted to "LCL_VAR long A.B" and then // there is type mistmatch between "GT_FIELD struct B.c" and "LCL_VAR long A.B". // void Compiler::StructPromotionHelper::CheckRetypedAsScalar(CORINFO_FIELD_HANDLE fieldHnd, var_types requestedType) { assert(retypedFieldsMap.Lookup(fieldHnd)); assert(retypedFieldsMap[fieldHnd] == requestedType); } #endif // DEBUG //-------------------------------------------------------------------------------------------- // CanPromoteStructType - checks if the struct type can be promoted. // // Arguments: // typeHnd - struct handle to check. // // Return value: // true if the struct type can be promoted. // // Notes: // The last analyzed type is memorized to skip the check if we ask about the same time again next. // However, it was not found profitable to memorize all analyzed types in a map. // // The check initializes only nessasary fields in lvaStructPromotionInfo, // so if the promotion is rejected early than most fields will be uninitialized. // bool Compiler::StructPromotionHelper::CanPromoteStructType(CORINFO_CLASS_HANDLE typeHnd) { assert(typeHnd != nullptr); if (!compiler->eeIsValueClass(typeHnd)) { // TODO-ObjectStackAllocation: Enable promotion of fields of stack-allocated objects. return false; } if (structPromotionInfo.typeHnd == typeHnd) { // Asking for the same type of struct as the last time. // Nothing need to be done. // Fall through ... return structPromotionInfo.canPromote; } // Analyze this type from scratch. structPromotionInfo = lvaStructPromotionInfo(typeHnd); // sizeof(double) represents the size of the largest primitive type that we can struct promote. // In the future this may be changing to XMM_REGSIZE_BYTES. // Note: MaxOffset is used below to declare a local array, and therefore must be a compile-time constant. CLANG_FORMAT_COMMENT_ANCHOR; #if defined(FEATURE_SIMD) #if defined(TARGET_XARCH) // This will allow promotion of 4 Vector<T> fields on AVX2 or Vector256<T> on AVX, // or 8 Vector<T>/Vector128<T> fields on SSE2. const int MaxOffset = MAX_NumOfFieldsInPromotableStruct * YMM_REGSIZE_BYTES; #elif defined(TARGET_ARM64) const int MaxOffset = MAX_NumOfFieldsInPromotableStruct * FP_REGSIZE_BYTES; #endif // defined(TARGET_XARCH) || defined(TARGET_ARM64) #else // !FEATURE_SIMD const int MaxOffset = MAX_NumOfFieldsInPromotableStruct * sizeof(double); #endif // !FEATURE_SIMD assert((BYTE)MaxOffset == MaxOffset); // because lvaStructFieldInfo.fldOffset is byte-sized assert((BYTE)MAX_NumOfFieldsInPromotableStruct == MAX_NumOfFieldsInPromotableStruct); // because lvaStructFieldInfo.fieldCnt is byte-sized bool containsGCpointers = false; COMP_HANDLE compHandle = compiler->info.compCompHnd; unsigned structSize = compHandle->getClassSize(typeHnd); if (structSize > MaxOffset) { return false; // struct is too large } unsigned fieldCnt = compHandle->getClassNumInstanceFields(typeHnd); if (fieldCnt == 0 || fieldCnt > MAX_NumOfFieldsInPromotableStruct) { return false; // struct must have between 1 and MAX_NumOfFieldsInPromotableStruct fields } structPromotionInfo.fieldCnt = (unsigned char)fieldCnt; DWORD typeFlags = compHandle->getClassAttribs(typeHnd); bool overlappingFields = StructHasOverlappingFields(typeFlags); if (overlappingFields) { return false; } // Don't struct promote if we have an CUSTOMLAYOUT flag on an HFA type if (StructHasCustomLayout(typeFlags) && compiler->IsHfa(typeHnd)) { return false; } #ifdef TARGET_ARM // On ARM, we have a requirement on the struct alignment; see below. unsigned structAlignment = roundUp(compHandle->getClassAlignmentRequirement(typeHnd), TARGET_POINTER_SIZE); #endif // TARGET_ARM // If we have "Custom Layout" then we might have an explicit Size attribute // Managed C++ uses this for its structs, such C++ types will not contain GC pointers. // // The current VM implementation also incorrectly sets the CORINFO_FLG_CUSTOMLAYOUT // whenever a managed value class contains any GC pointers. // (See the comment for VMFLAG_NOT_TIGHTLY_PACKED in class.h) // // It is important to struct promote managed value classes that have GC pointers // So we compute the correct value for "CustomLayout" here // if (StructHasCustomLayout(typeFlags) && ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) == 0)) { structPromotionInfo.customLayout = true; } if (StructHasDontDigFieldsFlagSet(typeFlags)) { return CanConstructAndPromoteField(&structPromotionInfo); } unsigned fieldsSize = 0; for (BYTE ordinal = 0; ordinal < fieldCnt; ++ordinal) { CORINFO_FIELD_HANDLE fieldHnd = compHandle->getFieldInClass(typeHnd, ordinal); structPromotionInfo.fields[ordinal] = GetFieldInfo(fieldHnd, ordinal); const lvaStructFieldInfo& fieldInfo = structPromotionInfo.fields[ordinal]; noway_assert(fieldInfo.fldOffset < structSize); if (fieldInfo.fldSize == 0) { // Not a scalar type. return false; } if ((fieldInfo.fldOffset % fieldInfo.fldSize) != 0) { // The code in Compiler::genPushArgList that reconstitutes // struct values on the stack from promoted fields expects // those fields to be at their natural alignment. return false; } if (varTypeIsGC(fieldInfo.fldType)) { containsGCpointers = true; } // The end offset for this field should never be larger than our structSize. noway_assert(fieldInfo.fldOffset + fieldInfo.fldSize <= structSize); fieldsSize += fieldInfo.fldSize; #ifdef TARGET_ARM // On ARM, for struct types that don't use explicit layout, the alignment of the struct is // at least the max alignment of its fields. We take advantage of this invariant in struct promotion, // so verify it here. if (fieldInfo.fldSize > structAlignment) { // Don't promote vars whose struct types violates the invariant. (Alignment == size for primitives.) return false; } #endif // TARGET_ARM } // If we saw any GC pointer or by-ref fields above then CORINFO_FLG_CONTAINS_GC_PTR or // CORINFO_FLG_BYREF_LIKE has to be set! noway_assert((containsGCpointers == false) || ((typeFlags & (CORINFO_FLG_CONTAINS_GC_PTR | CORINFO_FLG_BYREF_LIKE)) != 0)); // Check if this promoted struct contains any holes. assert(!overlappingFields); if (fieldsSize != structSize) { // If sizes do not match it means we have an overlapping fields or holes. // Overlapping fields were rejected early, so here it can mean only holes. structPromotionInfo.containsHoles = true; } // Cool, this struct is promotable. structPromotionInfo.canPromote = true; return true; } //-------------------------------------------------------------------------------------------- // CanConstructAndPromoteField - checks if we can construct field types without asking about them directly. // // Arguments: // structPromotionInfo - struct promotion candidate information. // // Return value: // true if we can figure out the fields from available knowledge. // // Notes: // This is needed for AOT R2R compilation when we can't cross compilation bubble borders // so we should not ask about fields that are not directly referenced. If we do VM will have // to emit a type check for this field type but it does not have enough information about it. // As a workaround for perfomance critical corner case: struct with 1 gcref, we try to construct // the field information from indirect observations. // bool Compiler::StructPromotionHelper::CanConstructAndPromoteField(lvaStructPromotionInfo* structPromotionInfo) { const CORINFO_CLASS_HANDLE typeHnd = structPromotionInfo->typeHnd; const COMP_HANDLE compHandle = compiler->info.compCompHnd; const DWORD typeFlags = compHandle->getClassAttribs(typeHnd); if (structPromotionInfo->fieldCnt != 1) { // Can't find out values for several fields. return false; } if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) == 0) { // Can't find out type of a non-gc field. return false; } const unsigned structSize = compHandle->getClassSize(typeHnd); if (structSize != TARGET_POINTER_SIZE) { return false; } assert(!structPromotionInfo->containsHoles); assert(!structPromotionInfo->customLayout); lvaStructFieldInfo& fldInfo = structPromotionInfo->fields[0]; fldInfo.fldHnd = compHandle->getFieldInClass(typeHnd, 0); // We should not read it anymore. fldInfo.fldTypeHnd = 0; fldInfo.fldOffset = 0; fldInfo.fldOrdinal = 0; fldInfo.fldSize = TARGET_POINTER_SIZE; fldInfo.fldType = TYP_BYREF; structPromotionInfo->canPromote = true; return true; } //-------------------------------------------------------------------------------------------- // CanPromoteStructVar - checks if the struct can be promoted. // // Arguments: // lclNum - struct number to check. // // Return value: // true if the struct var can be promoted. // bool Compiler::StructPromotionHelper::CanPromoteStructVar(unsigned lclNum) { LclVarDsc* varDsc = compiler->lvaGetDesc(lclNum); assert(varTypeIsStruct(varDsc)); assert(!varDsc->lvPromoted); // Don't ask again :) // If this lclVar is used in a SIMD intrinsic, then we don't want to struct promote it. // Note, however, that SIMD lclVars that are NOT used in a SIMD intrinsic may be // profitably promoted. if (varDsc->lvIsUsedInSIMDIntrinsic()) { JITDUMP(" struct promotion of V%02u is disabled because lvIsUsedInSIMDIntrinsic()\n", lclNum); return false; } // Reject struct promotion of parameters when -GS stack reordering is enabled // as we could introduce shadow copies of them. if (varDsc->lvIsParam && compiler->compGSReorderStackLayout) { JITDUMP(" struct promotion of V%02u is disabled because lvIsParam and compGSReorderStackLayout\n", lclNum); return false; } if (!compiler->lvaEnregMultiRegVars && varDsc->lvIsMultiRegArgOrRet()) { JITDUMP(" struct promotion of V%02u is disabled because lvIsMultiRegArgOrRet()\n", lclNum); return false; } CORINFO_CLASS_HANDLE typeHnd = varDsc->GetStructHnd(); assert(typeHnd != NO_CLASS_HANDLE); bool canPromote = CanPromoteStructType(typeHnd); if (canPromote && varDsc->lvIsMultiRegArgOrRet()) { unsigned fieldCnt = structPromotionInfo.fieldCnt; if (fieldCnt > MAX_MULTIREG_COUNT) { canPromote = false; } #if defined(TARGET_ARMARCH) else { for (unsigned i = 0; canPromote && (i < fieldCnt); i++) { var_types fieldType = structPromotionInfo.fields[i].fldType; // Non-HFA structs are always passed in general purpose registers. // If there are any floating point fields, don't promote for now. // Likewise, since HVA structs are passed in SIMD registers // promotion of non FP or SIMD type fields is disallowed. // TODO-1stClassStructs: add support in Lowering and prolog generation // to enable promoting these types. if (varDsc->lvIsParam && (varDsc->lvIsHfa() != varTypeUsesFloatReg(fieldType))) { canPromote = false; } #if defined(FEATURE_SIMD) // If we have a register-passed struct with mixed non-opaque SIMD types (i.e. with defined fields) // and non-SIMD types, we don't currently handle that case in the prolog, so we can't promote. else if ((fieldCnt > 1) && varTypeIsStruct(fieldType) && !compiler->isOpaqueSIMDType(structPromotionInfo.fields[i].fldTypeHnd)) { canPromote = false; } #endif // FEATURE_SIMD } } #elif defined(UNIX_AMD64_ABI) else { SortStructFields(); // Only promote if the field types match the registers, unless we have a single SIMD field. SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc; compiler->eeGetSystemVAmd64PassStructInRegisterDescriptor(typeHnd, &structDesc); unsigned regCount = structDesc.eightByteCount; if ((structPromotionInfo.fieldCnt == 1) && varTypeIsSIMD(structPromotionInfo.fields[0].fldType)) { // Allow the case of promoting a single SIMD field, even if there are multiple registers. // We will fix this up in the prolog. } else if (structPromotionInfo.fieldCnt != regCount) { canPromote = false; } else { for (unsigned i = 0; canPromote && (i < regCount); i++) { lvaStructFieldInfo* fieldInfo = &(structPromotionInfo.fields[i]); var_types fieldType = fieldInfo->fldType; // We don't currently support passing SIMD types in registers. if (varTypeIsSIMD(fieldType)) { canPromote = false; } else if (varTypeUsesFloatReg(fieldType) != (structDesc.eightByteClassifications[i] == SystemVClassificationTypeSSE)) { canPromote = false; } } } } #endif // UNIX_AMD64_ABI } return canPromote; } //-------------------------------------------------------------------------------------------- // ShouldPromoteStructVar - Should a struct var be promoted if it can be promoted? // This routine mainly performs profitability checks. Right now it also has // some correctness checks due to limitations of down-stream phases. // // Arguments: // lclNum - struct local number; // // Return value: // true if the struct should be promoted. // bool Compiler::StructPromotionHelper::ShouldPromoteStructVar(unsigned lclNum) { LclVarDsc* varDsc = compiler->lvaGetDesc(lclNum); assert(varTypeIsStruct(varDsc)); assert(varDsc->GetStructHnd() == structPromotionInfo.typeHnd); assert(structPromotionInfo.canPromote); bool shouldPromote = true; // We *can* promote; *should* we promote? // We should only do so if promotion has potential savings. One source of savings // is if a field of the struct is accessed, since this access will be turned into // an access of the corresponding promoted field variable. Even if there are no // field accesses, but only block-level operations on the whole struct, if the struct // has only one or two fields, then doing those block operations field-wise is probably faster // than doing a whole-variable block operation (e.g., a hardware "copy loop" on x86). // Struct promotion also provides the following benefits: reduce stack frame size, // reduce the need for zero init of stack frame and fine grained constant/copy prop. // Asm diffs indicate that promoting structs up to 3 fields is a net size win. // So if no fields are accessed independently, and there are four or more fields, // then do not promote. // // TODO: Ideally we would want to consider the impact of whether the struct is // passed as a parameter or assigned the return value of a call. Because once promoted, // struct copying is done by field by field assignment instead of a more efficient // rep.stos or xmm reg based copy. if (structPromotionInfo.fieldCnt > 3 && !varDsc->lvFieldAccessed) { JITDUMP("Not promoting promotable struct local V%02u: #fields = %d, fieldAccessed = %d.\n", lclNum, structPromotionInfo.fieldCnt, varDsc->lvFieldAccessed); shouldPromote = false; } else if (varDsc->lvIsMultiRegRet && structPromotionInfo.containsHoles && structPromotionInfo.customLayout) { JITDUMP("Not promoting multi-reg returned struct local V%02u with holes.\n", lclNum); shouldPromote = false; } #if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_ARM) // TODO-PERF - Only do this when the LclVar is used in an argument context // TODO-ARM64 - HFA support should also eliminate the need for this. // TODO-ARM32 - HFA support should also eliminate the need for this. // TODO-LSRA - Currently doesn't support the passing of floating point LCL_VARS in the integer registers // // For now we currently don't promote structs with a single float field // Promoting it can cause us to shuffle it back and forth between the int and // the float regs when it is used as a argument, which is very expensive for XARCH // else if ((structPromotionInfo.fieldCnt == 1) && varTypeIsFloating(structPromotionInfo.fields[0].fldType)) { JITDUMP("Not promoting promotable struct local V%02u: #fields = %d because it is a struct with " "single float field.\n", lclNum, structPromotionInfo.fieldCnt); shouldPromote = false; } #endif // TARGET_AMD64 || TARGET_ARM64 || TARGET_ARM else if (varDsc->lvIsParam && !compiler->lvaIsImplicitByRefLocal(lclNum) && !varDsc->lvIsHfa()) { #if FEATURE_MULTIREG_STRUCT_PROMOTE // Is this a variable holding a value with exactly two fields passed in // multiple registers? if (compiler->lvaIsMultiregStruct(varDsc, compiler->info.compIsVarArgs)) { if (structPromotionInfo.containsHoles && structPromotionInfo.customLayout) { JITDUMP("Not promoting multi-reg struct local V%02u with holes.\n", lclNum); shouldPromote = false; } else if ((structPromotionInfo.fieldCnt != 2) && !((structPromotionInfo.fieldCnt == 1) && varTypeIsSIMD(structPromotionInfo.fields[0].fldType))) { JITDUMP("Not promoting multireg struct local V%02u, because lvIsParam is true, #fields != 2 and it's " "not a single SIMD.\n", lclNum); shouldPromote = false; } } else #endif // !FEATURE_MULTIREG_STRUCT_PROMOTE // TODO-PERF - Implement struct promotion for incoming single-register structs. // Also the implementation of jmp uses the 4 byte move to store // byte parameters to the stack, so that if we have a byte field // with something else occupying the same 4-byte slot, it will // overwrite other fields. if (structPromotionInfo.fieldCnt != 1) { JITDUMP("Not promoting promotable struct local V%02u, because lvIsParam is true and #fields = " "%d.\n", lclNum, structPromotionInfo.fieldCnt); shouldPromote = false; } } else if ((lclNum == compiler->genReturnLocal) && (structPromotionInfo.fieldCnt > 1)) { // TODO-1stClassStructs: a temporary solution to keep diffs small, it will be fixed later. shouldPromote = false; } #if defined(DEBUG) else if (compiler->compPromoteFewerStructs(lclNum)) { // Do not promote some structs, that can be promoted, to stress promoted/unpromoted moves. JITDUMP("Not promoting promotable struct local V%02u, because of STRESS_PROMOTE_FEWER_STRUCTS\n", lclNum); shouldPromote = false; } #endif // // If the lvRefCnt is zero and we have a struct promoted parameter we can end up with an extra store of // the the incoming register into the stack frame slot. // In that case, we would like to avoid promortion. // However we haven't yet computed the lvRefCnt values so we can't do that. // CLANG_FORMAT_COMMENT_ANCHOR; return shouldPromote; } //-------------------------------------------------------------------------------------------- // SortStructFields - sort the fields according to the increasing order of the field offset. // // Notes: // This is needed because the fields need to be pushed on stack (when referenced as a struct) in offset order. // void Compiler::StructPromotionHelper::SortStructFields() { if (!structPromotionInfo.fieldsSorted) { jitstd::sort(structPromotionInfo.fields, structPromotionInfo.fields + structPromotionInfo.fieldCnt, lvaFieldOffsetCmp()); structPromotionInfo.fieldsSorted = true; } } //-------------------------------------------------------------------------------------------- // GetFieldInfo - get struct field information. // Arguments: // fieldHnd - field handle to get info for; // ordinal - field ordinal. // // Return value: // field information. // Compiler::lvaStructFieldInfo Compiler::StructPromotionHelper::GetFieldInfo(CORINFO_FIELD_HANDLE fieldHnd, BYTE ordinal) { lvaStructFieldInfo fieldInfo; fieldInfo.fldHnd = fieldHnd; unsigned fldOffset = compiler->info.compCompHnd->getFieldOffset(fieldInfo.fldHnd); fieldInfo.fldOffset = (BYTE)fldOffset; fieldInfo.fldOrdinal = ordinal; CorInfoType corType = compiler->info.compCompHnd->getFieldType(fieldInfo.fldHnd, &fieldInfo.fldTypeHnd); fieldInfo.fldType = JITtype2varType(corType); fieldInfo.fldSize = genTypeSize(fieldInfo.fldType); #ifdef FEATURE_SIMD // Check to see if this is a SIMD type. // We will only check this if we have already found a SIMD type, which will be true if // we have encountered any SIMD intrinsics. if (compiler->usesSIMDTypes() && (fieldInfo.fldSize == 0) && compiler->isSIMDorHWSIMDClass(fieldInfo.fldTypeHnd)) { unsigned simdSize; CorInfoType simdBaseJitType = compiler->getBaseJitTypeAndSizeOfSIMDType(fieldInfo.fldTypeHnd, &simdSize); // We will only promote fields of SIMD types that fit into a SIMD register. if (simdBaseJitType != CORINFO_TYPE_UNDEF) { if ((simdSize >= compiler->minSIMDStructBytes()) && (simdSize <= compiler->maxSIMDStructBytes())) { fieldInfo.fldType = compiler->getSIMDTypeForSize(simdSize); fieldInfo.fldSize = simdSize; #ifdef DEBUG retypedFieldsMap.Set(fieldInfo.fldHnd, fieldInfo.fldType, RetypedAsScalarFieldsMap::Overwrite); #endif // DEBUG } } } #endif // FEATURE_SIMD if (fieldInfo.fldSize == 0) { TryPromoteStructField(fieldInfo); } return fieldInfo; } //-------------------------------------------------------------------------------------------- // TryPromoteStructField - checks that this struct's field is a struct that can be promoted as scalar type // aligned at its natural boundary. Promotes the field as a scalar if the check succeeded. // // Arguments: // fieldInfo - information about the field in the outer struct. // // Return value: // true if the internal struct was promoted. // bool Compiler::StructPromotionHelper::TryPromoteStructField(lvaStructFieldInfo& fieldInfo) { // Size of TYP_BLK, TYP_FUNC, TYP_VOID and TYP_STRUCT is zero. // Early out if field type is other than TYP_STRUCT. // This is a defensive check as we don't expect a struct to have // fields of TYP_BLK, TYP_FUNC or TYP_VOID. if (fieldInfo.fldType != TYP_STRUCT) { return false; } COMP_HANDLE compHandle = compiler->info.compCompHnd; // Do not promote if the struct field in turn has more than one field. if (compHandle->getClassNumInstanceFields(fieldInfo.fldTypeHnd) != 1) { return false; } // Do not promote if the single field is not aligned at its natural boundary within // the struct field. CORINFO_FIELD_HANDLE innerFieldHndl = compHandle->getFieldInClass(fieldInfo.fldTypeHnd, 0); unsigned innerFieldOffset = compHandle->getFieldOffset(innerFieldHndl); if (innerFieldOffset != 0) { return false; } CorInfoType fieldCorType = compHandle->getFieldType(innerFieldHndl); var_types fieldVarType = JITtype2varType(fieldCorType); unsigned fieldSize = genTypeSize(fieldVarType); // Do not promote if the field is not a primitive type, is floating-point, // or is not properly aligned. // // TODO-PERF: Structs containing a single floating-point field on Amd64 // need to be passed in integer registers. Right now LSRA doesn't support // passing of floating-point LCL_VARS in integer registers. Enabling promotion // of such structs results in an assert in lsra right now. // // TODO-CQ: Right now we only promote an actual SIMD typed field, which would cause // a nested SIMD type to fail promotion. if (fieldSize == 0 || fieldSize > TARGET_POINTER_SIZE || varTypeIsFloating(fieldVarType)) { JITDUMP("Promotion blocked: struct contains struct field with one field," " but that field has invalid size or type.\n"); return false; } if (fieldSize != TARGET_POINTER_SIZE) { unsigned outerFieldOffset = compHandle->getFieldOffset(fieldInfo.fldHnd); if ((outerFieldOffset % fieldSize) != 0) { JITDUMP("Promotion blocked: struct contains struct field with one field," " but the outer struct offset %u is not a multiple of the inner field size %u.\n", outerFieldOffset, fieldSize); return false; } } // Insist this wrapped field occupy all of its parent storage. unsigned innerStructSize = compHandle->getClassSize(fieldInfo.fldTypeHnd); if (fieldSize != innerStructSize) { JITDUMP("Promotion blocked: struct contains struct field with one field," " but that field is not the same size as its parent.\n"); return false; } // Retype the field as the type of the single field of the struct. // This is a hack that allows us to promote such fields before we support recursive struct promotion // (tracked by #10019). fieldInfo.fldType = fieldVarType; fieldInfo.fldSize = fieldSize; #ifdef DEBUG retypedFieldsMap.Set(fieldInfo.fldHnd, fieldInfo.fldType, RetypedAsScalarFieldsMap::Overwrite); #endif // DEBUG return true; } //-------------------------------------------------------------------------------------------- // PromoteStructVar - promote struct variable. // // Arguments: // lclNum - struct local number; // void Compiler::StructPromotionHelper::PromoteStructVar(unsigned lclNum) { LclVarDsc* varDsc = compiler->lvaGetDesc(lclNum); // We should never see a reg-sized non-field-addressed struct here. assert(!varDsc->lvRegStruct); assert(varDsc->GetStructHnd() == structPromotionInfo.typeHnd); assert(structPromotionInfo.canPromote); varDsc->lvFieldCnt = structPromotionInfo.fieldCnt; varDsc->lvFieldLclStart = compiler->lvaCount; varDsc->lvPromoted = true; varDsc->lvContainsHoles = structPromotionInfo.containsHoles; varDsc->lvCustomLayout = structPromotionInfo.customLayout; #ifdef DEBUG // Don't change the source to a TYP_BLK either. varDsc->lvKeepType = 1; #endif #ifdef DEBUG if (compiler->verbose) { printf("\nPromoting struct local V%02u (%s):", lclNum, compiler->eeGetClassName(varDsc->GetStructHnd())); } #endif SortStructFields(); for (unsigned index = 0; index < structPromotionInfo.fieldCnt; ++index) { const lvaStructFieldInfo* pFieldInfo = &structPromotionInfo.fields[index]; if (varTypeUsesFloatReg(pFieldInfo->fldType)) { // Whenever we promote a struct that contains a floating point field // it's possible we transition from a method that originally only had integer // local vars to start having FP. We have to communicate this through this flag // since LSRA later on will use this flag to determine whether or not to track FP register sets. compiler->compFloatingPointUsed = true; } // Now grab the temp for the field local. #ifdef DEBUG char buf[200]; sprintf_s(buf, sizeof(buf), "%s V%02u.%s (fldOffset=0x%x)", "field", lclNum, compiler->eeGetFieldName(pFieldInfo->fldHnd), pFieldInfo->fldOffset); // We need to copy 'buf' as lvaGrabTemp() below caches a copy to its argument. size_t len = strlen(buf) + 1; char* bufp = compiler->getAllocator(CMK_DebugOnly).allocate<char>(len); strcpy_s(bufp, len, buf); if (index > 0) { noway_assert(pFieldInfo->fldOffset > (pFieldInfo - 1)->fldOffset); } #endif // Lifetime of field locals might span multiple BBs, so they must be long lifetime temps. const unsigned varNum = compiler->lvaGrabTemp(false DEBUGARG(bufp)); // lvaGrabTemp can reallocate the lvaTable, so // refresh the cached varDsc for lclNum. varDsc = compiler->lvaGetDesc(lclNum); LclVarDsc* fieldVarDsc = compiler->lvaGetDesc(varNum); fieldVarDsc->lvType = pFieldInfo->fldType; fieldVarDsc->lvExactSize = pFieldInfo->fldSize; fieldVarDsc->lvIsStructField = true; fieldVarDsc->lvFieldHnd = pFieldInfo->fldHnd; fieldVarDsc->lvFldOffset = pFieldInfo->fldOffset; fieldVarDsc->lvFldOrdinal = pFieldInfo->fldOrdinal; fieldVarDsc->lvParentLcl = lclNum; fieldVarDsc->lvIsParam = varDsc->lvIsParam; // This new local may be the first time we've seen a long typed local. if (fieldVarDsc->lvType == TYP_LONG) { compiler->compLongUsed = true; } #if defined(TARGET_AMD64) || defined(TARGET_ARM64) // Reset the implicitByRef flag. fieldVarDsc->lvIsImplicitByRef = 0; #endif // Do we have a parameter that can be enregistered? // if (varDsc->lvIsRegArg) { fieldVarDsc->lvIsRegArg = true; regNumber parentArgReg = varDsc->GetArgReg(); #if FEATURE_MULTIREG_ARGS if (!compiler->lvaIsImplicitByRefLocal(lclNum)) { #ifdef UNIX_AMD64_ABI if (varTypeIsSIMD(fieldVarDsc) && (varDsc->lvFieldCnt == 1)) { // This SIMD typed field may be passed in multiple registers. fieldVarDsc->SetArgReg(parentArgReg); fieldVarDsc->SetOtherArgReg(varDsc->GetOtherArgReg()); } else #endif // UNIX_AMD64_ABI { regNumber fieldRegNum; if (index == 0) { fieldRegNum = parentArgReg; } else if (varDsc->lvIsHfa()) { unsigned regIncrement = fieldVarDsc->lvFldOrdinal; #ifdef TARGET_ARM // TODO: Need to determine if/how to handle split args. if (varDsc->GetHfaType() == TYP_DOUBLE) { regIncrement *= 2; } #endif // TARGET_ARM fieldRegNum = (regNumber)(parentArgReg + regIncrement); } else { assert(index == 1); fieldRegNum = varDsc->GetOtherArgReg(); } fieldVarDsc->SetArgReg(fieldRegNum); } } else #endif // FEATURE_MULTIREG_ARGS && defined(FEATURE_SIMD) { fieldVarDsc->SetArgReg(parentArgReg); } } #ifdef FEATURE_SIMD if (varTypeIsSIMD(pFieldInfo->fldType)) { // Set size to zero so that lvaSetStruct will appropriately set the SIMD-relevant fields. fieldVarDsc->lvExactSize = 0; compiler->lvaSetStruct(varNum, pFieldInfo->fldTypeHnd, false, true); // We will not recursively promote this, so mark it as 'lvRegStruct' (note that we wouldn't // be promoting this if we didn't think it could be enregistered. fieldVarDsc->lvRegStruct = true; } #endif // FEATURE_SIMD #ifdef DEBUG // This temporary should not be converted to a double in stress mode, // because we introduce assigns to it after the stress conversion fieldVarDsc->lvKeepType = 1; #endif } } //-------------------------------------------------------------------------------------------- // lvaGetFieldLocal - returns the local var index for a promoted field in a promoted struct var. // // Arguments: // varDsc - the promoted struct var descriptor; // fldOffset - field offset in the struct. // // Return value: // the index of the local that represents this field. // unsigned Compiler::lvaGetFieldLocal(const LclVarDsc* varDsc, unsigned int fldOffset) { noway_assert(varTypeIsStruct(varDsc)); noway_assert(varDsc->lvPromoted); for (unsigned i = varDsc->lvFieldLclStart; i < varDsc->lvFieldLclStart + varDsc->lvFieldCnt; ++i) { noway_assert(lvaTable[i].lvIsStructField); noway_assert(lvaTable[i].lvParentLcl == (unsigned)(varDsc - lvaTable)); if (lvaTable[i].lvFldOffset == fldOffset) { return i; } } // This is the not-found error return path, the caller should check for BAD_VAR_NUM return BAD_VAR_NUM; } /***************************************************************************** * * Set the local var "varNum" as address-exposed. * If this is a promoted struct, label it's fields the same way. */ void Compiler::lvaSetVarAddrExposed(unsigned varNum DEBUGARG(AddressExposedReason reason)) { LclVarDsc* varDsc = lvaGetDesc(varNum); varDsc->SetAddressExposed(true DEBUGARG(reason)); if (varDsc->lvPromoted) { noway_assert(varTypeIsStruct(varDsc)); for (unsigned i = varDsc->lvFieldLclStart; i < varDsc->lvFieldLclStart + varDsc->lvFieldCnt; ++i) { noway_assert(lvaTable[i].lvIsStructField); lvaTable[i].SetAddressExposed(true DEBUGARG(AddressExposedReason::PARENT_EXPOSED)); lvaSetVarDoNotEnregister(i DEBUGARG(DoNotEnregisterReason::AddrExposed)); } } lvaSetVarDoNotEnregister(varNum DEBUGARG(DoNotEnregisterReason::AddrExposed)); } //------------------------------------------------------------------------ // lvaSetVarLiveInOutOfHandler: Set the local varNum as being live in and/or out of a handler // // Arguments: // varNum - the varNum of the local // void Compiler::lvaSetVarLiveInOutOfHandler(unsigned varNum) { LclVarDsc* varDsc = lvaGetDesc(varNum); varDsc->lvLiveInOutOfHndlr = 1; if (varDsc->lvPromoted) { noway_assert(varTypeIsStruct(varDsc)); for (unsigned i = varDsc->lvFieldLclStart; i < varDsc->lvFieldLclStart + varDsc->lvFieldCnt; ++i) { noway_assert(lvaTable[i].lvIsStructField); lvaTable[i].lvLiveInOutOfHndlr = 1; // For now, only enregister an EH Var if it is a single def and whose refCnt > 1. if (!lvaEnregEHVars || !lvaTable[i].lvSingleDefRegCandidate || lvaTable[i].lvRefCnt() <= 1) { lvaSetVarDoNotEnregister(i DEBUGARG(DoNotEnregisterReason::LiveInOutOfHandler)); } } } // For now, only enregister an EH Var if it is a single def and whose refCnt > 1. if (!lvaEnregEHVars || !varDsc->lvSingleDefRegCandidate || varDsc->lvRefCnt() <= 1) { lvaSetVarDoNotEnregister(varNum DEBUGARG(DoNotEnregisterReason::LiveInOutOfHandler)); } #ifdef JIT32_GCENCODER else if (lvaKeepAliveAndReportThis() && (varNum == info.compThisArg)) { // For the JIT32_GCENCODER, when lvaKeepAliveAndReportThis is true, we must either keep the "this" pointer // in the same register for the entire method, or keep it on the stack. If it is EH-exposed, we can't ever // keep it in a register, since it must also be live on the stack. Therefore, we won't attempt to allocate it. lvaSetVarDoNotEnregister(varNum DEBUGARG(DoNotEnregisterReason::LiveInOutOfHandler)); } #endif // JIT32_GCENCODER } /***************************************************************************** * * Record that the local var "varNum" should not be enregistered (for one of several reasons.) */ void Compiler::lvaSetVarDoNotEnregister(unsigned varNum DEBUGARG(DoNotEnregisterReason reason)) { LclVarDsc* varDsc = lvaGetDesc(varNum); const bool wasAlreadyMarkedDoNotEnreg = (varDsc->lvDoNotEnregister == 1); varDsc->lvDoNotEnregister = 1; #ifdef DEBUG if (!wasAlreadyMarkedDoNotEnreg) { varDsc->SetDoNotEnregReason(reason); } if (verbose) { printf("\nLocal V%02u should not be enregistered because: ", varNum); } switch (reason) { case DoNotEnregisterReason::AddrExposed: JITDUMP("it is address exposed\n"); assert(varDsc->IsAddressExposed()); break; case DoNotEnregisterReason::DontEnregStructs: JITDUMP("struct enregistration is disabled\n"); assert(varTypeIsStruct(varDsc)); break; case DoNotEnregisterReason::NotRegSizeStruct: JITDUMP("struct size does not match reg size\n"); assert(varTypeIsStruct(varDsc)); break; case DoNotEnregisterReason::LocalField: JITDUMP("was accessed as a local field\n"); break; case DoNotEnregisterReason::VMNeedsStackAddr: JITDUMP("VM needs stack addr\n"); break; case DoNotEnregisterReason::LiveInOutOfHandler: JITDUMP("live in/out of a handler\n"); varDsc->lvLiveInOutOfHndlr = 1; break; case DoNotEnregisterReason::BlockOp: JITDUMP("written/read in a block op\n"); break; case DoNotEnregisterReason::IsStructArg: if (varTypeIsStruct(varDsc)) { JITDUMP("it is a struct arg\n"); } else { JITDUMP("it is reinterpreted as a struct arg\n"); } break; case DoNotEnregisterReason::DepField: JITDUMP("field of a dependently promoted struct\n"); assert(varDsc->lvIsStructField && (lvaGetParentPromotionType(varNum) != PROMOTION_TYPE_INDEPENDENT)); break; case DoNotEnregisterReason::NoRegVars: JITDUMP("opts.compFlags & CLFLG_REGVAR is not set\n"); assert(!compEnregLocals()); break; case DoNotEnregisterReason::MinOptsGC: JITDUMP("it is a GC Ref and we are compiling MinOpts\n"); assert(!JitConfig.JitMinOptsTrackGCrefs() && varTypeIsGC(varDsc->TypeGet())); break; #if !defined(TARGET_64BIT) case DoNotEnregisterReason::LongParamField: JITDUMP("it is a decomposed field of a long parameter\n"); break; #endif #ifdef JIT32_GCENCODER case DoNotEnregisterReason::PinningRef: JITDUMP("pinning ref\n"); assert(varDsc->lvPinned); break; #endif case DoNotEnregisterReason::LclAddrNode: JITDUMP("LclAddrVar/Fld takes the address of this node\n"); break; case DoNotEnregisterReason::CastTakesAddr: JITDUMP("cast takes addr\n"); break; case DoNotEnregisterReason::StoreBlkSrc: JITDUMP("the local is used as store block src\n"); break; case DoNotEnregisterReason::OneAsgRetyping: JITDUMP("OneAsg forbids enreg\n"); break; case DoNotEnregisterReason::SwizzleArg: JITDUMP("SwizzleArg\n"); break; case DoNotEnregisterReason::BlockOpRet: JITDUMP("return uses a block op\n"); break; case DoNotEnregisterReason::ReturnSpCheck: JITDUMP("Used for SP check\n"); break; case DoNotEnregisterReason::SimdUserForcesDep: JITDUMP("Promoted struct used by a SIMD/HWI node\n"); break; default: unreached(); break; } #endif } // Returns true if this local var is a multireg struct. // TODO-Throughput: This does a lookup on the class handle, and in the outgoing arg context // this information is already available on the fgArgTabEntry, and shouldn't need to be // recomputed. // bool Compiler::lvaIsMultiregStruct(LclVarDsc* varDsc, bool isVarArg) { if (varTypeIsStruct(varDsc->TypeGet())) { CORINFO_CLASS_HANDLE clsHnd = varDsc->GetStructHnd(); structPassingKind howToPassStruct; var_types type = getArgTypeForStruct(clsHnd, &howToPassStruct, isVarArg, varDsc->lvExactSize); if (howToPassStruct == SPK_ByValueAsHfa) { assert(type == TYP_STRUCT); return true; } #if defined(UNIX_AMD64_ABI) || defined(TARGET_ARM64) if (howToPassStruct == SPK_ByValue) { assert(type == TYP_STRUCT); return true; } #endif } return false; } /***************************************************************************** * Set the lvClass for a local variable of a struct type */ void Compiler::lvaSetStruct(unsigned varNum, CORINFO_CLASS_HANDLE typeHnd, bool unsafeValueClsCheck, bool setTypeInfo) { LclVarDsc* varDsc = lvaGetDesc(varNum); if (setTypeInfo) { varDsc->lvVerTypeInfo = typeInfo(TI_STRUCT, typeHnd); } // Set the type and associated info if we haven't already set it. if (varDsc->lvType == TYP_UNDEF) { varDsc->lvType = TYP_STRUCT; } if (varDsc->GetLayout() == nullptr) { ClassLayout* layout = typGetObjLayout(typeHnd); varDsc->SetLayout(layout); assert(varDsc->lvExactSize == 0); varDsc->lvExactSize = layout->GetSize(); assert(varDsc->lvExactSize != 0); if (layout->IsValueClass()) { CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF; varDsc->lvType = impNormStructType(typeHnd, &simdBaseJitType); #if defined(TARGET_AMD64) || defined(TARGET_ARM64) // Mark implicit byref struct parameters if (varDsc->lvIsParam && !varDsc->lvIsStructField) { structPassingKind howToReturnStruct; getArgTypeForStruct(typeHnd, &howToReturnStruct, this->info.compIsVarArgs, varDsc->lvExactSize); if (howToReturnStruct == SPK_ByReference) { JITDUMP("Marking V%02i as a byref parameter\n", varNum); varDsc->lvIsImplicitByRef = 1; } } #endif // defined(TARGET_AMD64) || defined(TARGET_ARM64) #if FEATURE_SIMD if (simdBaseJitType != CORINFO_TYPE_UNDEF) { assert(varTypeIsSIMD(varDsc)); varDsc->lvSIMDType = true; varDsc->SetSimdBaseJitType(simdBaseJitType); } #endif // FEATURE_SIMD if (GlobalJitOptions::compFeatureHfa) { // For structs that are small enough, we check and set HFA element type if (varDsc->lvExactSize <= MAX_PASS_MULTIREG_BYTES) { // hfaType is set to float, double or SIMD type if it is an HFA, otherwise TYP_UNDEF var_types hfaType = GetHfaType(typeHnd); if (varTypeIsValidHfaType(hfaType)) { varDsc->SetHfaType(hfaType); // hfa variables can never contain GC pointers assert(!layout->HasGCPtr()); // The size of this struct should be evenly divisible by 4 or 8 assert((varDsc->lvExactSize % genTypeSize(hfaType)) == 0); // The number of elements in the HFA should fit into our MAX_ARG_REG_COUNT limit assert((varDsc->lvExactSize / genTypeSize(hfaType)) <= MAX_ARG_REG_COUNT); } } } } } else { #if FEATURE_SIMD assert(!varTypeIsSIMD(varDsc) || (varDsc->GetSimdBaseType() != TYP_UNKNOWN)); #endif // FEATURE_SIMD ClassLayout* layout = typGetObjLayout(typeHnd); assert(ClassLayout::AreCompatible(varDsc->GetLayout(), layout)); // Inlining could replace a canon struct type with an exact one. varDsc->SetLayout(layout); assert(varDsc->lvExactSize != 0); } #ifndef TARGET_64BIT bool fDoubleAlignHint = false; #ifdef TARGET_X86 fDoubleAlignHint = true; #endif if (info.compCompHnd->getClassAlignmentRequirement(typeHnd, fDoubleAlignHint) == 8) { #ifdef DEBUG if (verbose) { printf("Marking struct in V%02i with double align flag\n", varNum); } #endif varDsc->lvStructDoubleAlign = 1; } #endif // not TARGET_64BIT unsigned classAttribs = info.compCompHnd->getClassAttribs(typeHnd); varDsc->lvOverlappingFields = StructHasOverlappingFields(classAttribs); // Check whether this local is an unsafe value type and requires GS cookie protection. // GS checks require the stack to be re-ordered, which can't be done with EnC. if (unsafeValueClsCheck && (classAttribs & CORINFO_FLG_UNSAFE_VALUECLASS) && !opts.compDbgEnC) { setNeedsGSSecurityCookie(); compGSReorderStackLayout = true; varDsc->lvIsUnsafeBuffer = true; } #ifdef DEBUG if (JitConfig.EnableExtraSuperPmiQueries()) { makeExtraStructQueries(typeHnd, 2); } #endif // DEBUG } #ifdef DEBUG //------------------------------------------------------------------------ // makeExtraStructQueries: Query the information for the given struct handle. // // Arguments: // structHandle -- The handle for the struct type we're querying. // level -- How many more levels to recurse. // void Compiler::makeExtraStructQueries(CORINFO_CLASS_HANDLE structHandle, int level) { if (level <= 0) { return; } assert(structHandle != NO_CLASS_HANDLE); (void)typGetObjLayout(structHandle); DWORD typeFlags = info.compCompHnd->getClassAttribs(structHandle); if (StructHasDontDigFieldsFlagSet(typeFlags)) { // In AOT ReadyToRun compilation, don't query fields of types // outside of the current version bubble. return; } unsigned fieldCnt = info.compCompHnd->getClassNumInstanceFields(structHandle); impNormStructType(structHandle); #ifdef TARGET_ARMARCH GetHfaType(structHandle); #endif for (unsigned int i = 0; i < fieldCnt; i++) { CORINFO_FIELD_HANDLE fieldHandle = info.compCompHnd->getFieldInClass(structHandle, i); unsigned fldOffset = info.compCompHnd->getFieldOffset(fieldHandle); CORINFO_CLASS_HANDLE fieldClassHandle = NO_CLASS_HANDLE; CorInfoType fieldCorType = info.compCompHnd->getFieldType(fieldHandle, &fieldClassHandle); var_types fieldVarType = JITtype2varType(fieldCorType); if (fieldClassHandle != NO_CLASS_HANDLE) { if (varTypeIsStruct(fieldVarType)) { makeExtraStructQueries(fieldClassHandle, level - 1); } } } } #endif // DEBUG //------------------------------------------------------------------------ // lvaSetStructUsedAsVarArg: update hfa information for vararg struct args // // Arguments: // varNum -- number of the variable // // Notes: // This only affects arm64 varargs on windows where we need to pass // hfa arguments as if they are not HFAs. // // This function should only be called if the struct is used in a varargs // method. void Compiler::lvaSetStructUsedAsVarArg(unsigned varNum) { if (GlobalJitOptions::compFeatureHfa && TargetOS::IsWindows) { #if defined(TARGET_ARM64) LclVarDsc* varDsc = lvaGetDesc(varNum); // For varargs methods incoming and outgoing arguments should not be treated // as HFA. varDsc->SetHfaType(TYP_UNDEF); #endif // defined(TARGET_ARM64) } } //------------------------------------------------------------------------ // lvaSetClass: set class information for a local var. // // Arguments: // varNum -- number of the variable // clsHnd -- class handle to use in set or update // isExact -- true if class is known exactly // // Notes: // varNum must not already have a ref class handle. void Compiler::lvaSetClass(unsigned varNum, CORINFO_CLASS_HANDLE clsHnd, bool isExact) { noway_assert(varNum < lvaCount); // If we are just importing, we cannot reliably track local ref types, // since the jit maps CORINFO_TYPE_VAR to TYP_REF. if (compIsForImportOnly()) { return; } // Else we should have a type handle. assert(clsHnd != nullptr); LclVarDsc* varDsc = lvaGetDesc(varNum); assert(varDsc->lvType == TYP_REF); // We shoud not have any ref type information for this var. assert(varDsc->lvClassHnd == NO_CLASS_HANDLE); assert(!varDsc->lvClassIsExact); JITDUMP("\nlvaSetClass: setting class for V%02i to (%p) %s %s\n", varNum, dspPtr(clsHnd), info.compCompHnd->getClassName(clsHnd), isExact ? " [exact]" : ""); varDsc->lvClassHnd = clsHnd; varDsc->lvClassIsExact = isExact; } //------------------------------------------------------------------------ // lvaSetClass: set class information for a local var from a tree or stack type // // Arguments: // varNum -- number of the variable. Must be a single def local // tree -- tree establishing the variable's value // stackHnd -- handle for the type from the evaluation stack // // Notes: // Preferentially uses the tree's type, when available. Since not all // tree kinds can track ref types, the stack type is used as a // fallback. If there is no stack type, then the class is set to object. void Compiler::lvaSetClass(unsigned varNum, GenTree* tree, CORINFO_CLASS_HANDLE stackHnd) { bool isExact = false; bool isNonNull = false; CORINFO_CLASS_HANDLE clsHnd = gtGetClassHandle(tree, &isExact, &isNonNull); if (clsHnd != nullptr) { lvaSetClass(varNum, clsHnd, isExact); } else if (stackHnd != nullptr) { lvaSetClass(varNum, stackHnd); } else { lvaSetClass(varNum, impGetObjectClass()); } } //------------------------------------------------------------------------ // lvaUpdateClass: update class information for a local var. // // Arguments: // varNum -- number of the variable // clsHnd -- class handle to use in set or update // isExact -- true if class is known exactly // // Notes: // // This method models the type update rule for an assignment. // // Updates currently should only happen for single-def user args or // locals, when we are processing the expression actually being // used to initialize the local (or inlined arg). The update will // change the local from the declared type to the type of the // initial value. // // These updates should always *improve* what we know about the // type, that is making an inexact type exact, or changing a type // to some subtype. However the jit lacks precise type information // for shared code, so ensuring this is so is currently not // possible. void Compiler::lvaUpdateClass(unsigned varNum, CORINFO_CLASS_HANDLE clsHnd, bool isExact) { assert(varNum < lvaCount); // If we are just importing, we cannot reliably track local ref types, // since the jit maps CORINFO_TYPE_VAR to TYP_REF. if (compIsForImportOnly()) { return; } // Else we should have a class handle to consider assert(clsHnd != nullptr); LclVarDsc* varDsc = lvaGetDesc(varNum); assert(varDsc->lvType == TYP_REF); // We should already have a class assert(varDsc->lvClassHnd != NO_CLASS_HANDLE); // We should only be updating classes for single-def locals. assert(varDsc->lvSingleDef); // Now see if we should update. // // New information may not always be "better" so do some // simple analysis to decide if the update is worthwhile. const bool isNewClass = (clsHnd != varDsc->lvClassHnd); bool shouldUpdate = false; // Are we attempting to update the class? Only check this when we have // an new type and the existing class is inexact... we should not be // updating exact classes. if (!varDsc->lvClassIsExact && isNewClass) { shouldUpdate = !!info.compCompHnd->isMoreSpecificType(varDsc->lvClassHnd, clsHnd); } // Else are we attempting to update exactness? else if (isExact && !varDsc->lvClassIsExact && !isNewClass) { shouldUpdate = true; } #if DEBUG if (isNewClass || (isExact != varDsc->lvClassIsExact)) { JITDUMP("\nlvaUpdateClass:%s Updating class for V%02u", shouldUpdate ? "" : " NOT", varNum); JITDUMP(" from (%p) %s%s", dspPtr(varDsc->lvClassHnd), info.compCompHnd->getClassName(varDsc->lvClassHnd), varDsc->lvClassIsExact ? " [exact]" : ""); JITDUMP(" to (%p) %s%s\n", dspPtr(clsHnd), info.compCompHnd->getClassName(clsHnd), isExact ? " [exact]" : ""); } #endif // DEBUG if (shouldUpdate) { varDsc->lvClassHnd = clsHnd; varDsc->lvClassIsExact = isExact; #if DEBUG // Note we've modified the type... varDsc->lvClassInfoUpdated = true; #endif // DEBUG } return; } //------------------------------------------------------------------------ // lvaUpdateClass: Uupdate class information for a local var from a tree // or stack type // // Arguments: // varNum -- number of the variable. Must be a single def local // tree -- tree establishing the variable's value // stackHnd -- handle for the type from the evaluation stack // // Notes: // Preferentially uses the tree's type, when available. Since not all // tree kinds can track ref types, the stack type is used as a // fallback. void Compiler::lvaUpdateClass(unsigned varNum, GenTree* tree, CORINFO_CLASS_HANDLE stackHnd) { bool isExact = false; bool isNonNull = false; CORINFO_CLASS_HANDLE clsHnd = gtGetClassHandle(tree, &isExact, &isNonNull); if (clsHnd != nullptr) { lvaUpdateClass(varNum, clsHnd, isExact); } else if (stackHnd != nullptr) { lvaUpdateClass(varNum, stackHnd); } } //------------------------------------------------------------------------ // lvaLclSize: returns size of a local variable, in bytes // // Arguments: // varNum -- variable to query // // Returns: // Number of bytes needed on the frame for such a local. unsigned Compiler::lvaLclSize(unsigned varNum) { assert(varNum < lvaCount); var_types varType = lvaTable[varNum].TypeGet(); switch (varType) { case TYP_STRUCT: case TYP_BLK: return lvaTable[varNum].lvSize(); case TYP_LCLBLK: #if FEATURE_FIXED_OUT_ARGS // Note that this operation performs a read of a PhasedVar noway_assert(varNum == lvaOutgoingArgSpaceVar); return lvaOutgoingArgSpaceSize; #else // FEATURE_FIXED_OUT_ARGS assert(!"Unknown size"); NO_WAY("Target doesn't support TYP_LCLBLK"); #endif // FEATURE_FIXED_OUT_ARGS default: // This must be a primitive var. Fall out of switch statement break; } #ifdef TARGET_64BIT // We only need this Quirk for TARGET_64BIT if (lvaTable[varNum].lvQuirkToLong) { noway_assert(lvaTable[varNum].IsAddressExposed()); return genTypeStSz(TYP_LONG) * sizeof(int); // return 8 (2 * 4) } #endif return genTypeStSz(varType) * sizeof(int); } // // Return the exact width of local variable "varNum" -- the number of bytes // you'd need to copy in order to overwrite the value. // unsigned Compiler::lvaLclExactSize(unsigned varNum) { assert(varNum < lvaCount); var_types varType = lvaTable[varNum].TypeGet(); switch (varType) { case TYP_STRUCT: case TYP_BLK: return lvaTable[varNum].lvExactSize; case TYP_LCLBLK: #if FEATURE_FIXED_OUT_ARGS // Note that this operation performs a read of a PhasedVar noway_assert(lvaOutgoingArgSpaceSize >= 0); noway_assert(varNum == lvaOutgoingArgSpaceVar); return lvaOutgoingArgSpaceSize; #else // FEATURE_FIXED_OUT_ARGS assert(!"Unknown size"); NO_WAY("Target doesn't support TYP_LCLBLK"); #endif // FEATURE_FIXED_OUT_ARGS default: // This must be a primitive var. Fall out of switch statement break; } return genTypeSize(varType); } // getCalledCount -- get the value used to normalized weights for this method // if we don't have profile data then getCalledCount will return BB_UNITY_WEIGHT (100) // otherwise it returns the number of times that profile data says the method was called. // // static weight_t BasicBlock::getCalledCount(Compiler* comp) { // when we don't have profile data then fgCalledCount will be BB_UNITY_WEIGHT (100) weight_t calledCount = comp->fgCalledCount; // If we haven't yet reach the place where we setup fgCalledCount it could still be zero // so return a reasonable value to use until we set it. // if (calledCount == 0) { if (comp->fgIsUsingProfileWeights()) { // When we use profile data block counts we have exact counts, // not multiples of BB_UNITY_WEIGHT (100) calledCount = 1; } else { calledCount = comp->fgFirstBB->bbWeight; if (calledCount == 0) { calledCount = BB_UNITY_WEIGHT; } } } return calledCount; } // getBBWeight -- get the normalized weight of this block weight_t BasicBlock::getBBWeight(Compiler* comp) { if (this->bbWeight == BB_ZERO_WEIGHT) { return BB_ZERO_WEIGHT; } else { weight_t calledCount = getCalledCount(comp); // Normalize the bbWeights by multiplying by BB_UNITY_WEIGHT and dividing by the calledCount. // weight_t fullResult = this->bbWeight * BB_UNITY_WEIGHT / calledCount; return fullResult; } } // LclVarDsc "less" comparer used to compare the weight of two locals, when optimizing for small code. class LclVarDsc_SmallCode_Less { const LclVarDsc* m_lvaTable; INDEBUG(unsigned m_lvaCount;) public: LclVarDsc_SmallCode_Less(const LclVarDsc* lvaTable DEBUGARG(unsigned lvaCount)) : m_lvaTable(lvaTable) #ifdef DEBUG , m_lvaCount(lvaCount) #endif { } bool operator()(unsigned n1, unsigned n2) { assert(n1 < m_lvaCount); assert(n2 < m_lvaCount); const LclVarDsc* dsc1 = &m_lvaTable[n1]; const LclVarDsc* dsc2 = &m_lvaTable[n2]; // We should not be sorting untracked variables assert(dsc1->lvTracked); assert(dsc2->lvTracked); // We should not be sorting after registers have been allocated assert(!dsc1->lvRegister); assert(!dsc2->lvRegister); unsigned weight1 = dsc1->lvRefCnt(); unsigned weight2 = dsc2->lvRefCnt(); #ifndef TARGET_ARM // ARM-TODO: this was disabled for ARM under !FEATURE_FP_REGALLOC; it was probably a left-over from // legacy backend. It should be enabled and verified. // Force integer candidates to sort above float candidates. const bool isFloat1 = isFloatRegType(dsc1->lvType); const bool isFloat2 = isFloatRegType(dsc2->lvType); if (isFloat1 != isFloat2) { if ((weight2 != 0) && isFloat1) { return false; } if ((weight1 != 0) && isFloat2) { return true; } } #endif if (weight1 != weight2) { return weight1 > weight2; } // If the weighted ref counts are different then use their difference. if (dsc1->lvRefCntWtd() != dsc2->lvRefCntWtd()) { return dsc1->lvRefCntWtd() > dsc2->lvRefCntWtd(); } // We have equal ref counts and weighted ref counts. // Break the tie by: // - Increasing the weight by 2 if we are a register arg. // - Increasing the weight by 0.5 if we are a GC type. // // Review: seems odd that this is mixing counts and weights. if (weight1 != 0) { if (dsc1->lvIsRegArg) { weight1 += 2 * BB_UNITY_WEIGHT_UNSIGNED; } if (varTypeIsGC(dsc1->TypeGet())) { weight1 += BB_UNITY_WEIGHT_UNSIGNED / 2; } } if (weight2 != 0) { if (dsc2->lvIsRegArg) { weight2 += 2 * BB_UNITY_WEIGHT_UNSIGNED; } if (varTypeIsGC(dsc2->TypeGet())) { weight2 += BB_UNITY_WEIGHT_UNSIGNED / 2; } } if (weight1 != weight2) { return weight1 > weight2; } // To achieve a stable sort we use the LclNum (by way of the pointer address). return dsc1 < dsc2; } }; // LclVarDsc "less" comparer used to compare the weight of two locals, when optimizing for blended code. class LclVarDsc_BlendedCode_Less { const LclVarDsc* m_lvaTable; INDEBUG(unsigned m_lvaCount;) public: LclVarDsc_BlendedCode_Less(const LclVarDsc* lvaTable DEBUGARG(unsigned lvaCount)) : m_lvaTable(lvaTable) #ifdef DEBUG , m_lvaCount(lvaCount) #endif { } bool operator()(unsigned n1, unsigned n2) { assert(n1 < m_lvaCount); assert(n2 < m_lvaCount); const LclVarDsc* dsc1 = &m_lvaTable[n1]; const LclVarDsc* dsc2 = &m_lvaTable[n2]; // We should not be sorting untracked variables assert(dsc1->lvTracked); assert(dsc2->lvTracked); // We should not be sorting after registers have been allocated assert(!dsc1->lvRegister); assert(!dsc2->lvRegister); weight_t weight1 = dsc1->lvRefCntWtd(); weight_t weight2 = dsc2->lvRefCntWtd(); #ifndef TARGET_ARM // ARM-TODO: this was disabled for ARM under !FEATURE_FP_REGALLOC; it was probably a left-over from // legacy backend. It should be enabled and verified. // Force integer candidates to sort above float candidates. const bool isFloat1 = isFloatRegType(dsc1->lvType); const bool isFloat2 = isFloatRegType(dsc2->lvType); if (isFloat1 != isFloat2) { if (!Compiler::fgProfileWeightsEqual(weight2, 0) && isFloat1) { return false; } if (!Compiler::fgProfileWeightsEqual(weight1, 0) && isFloat2) { return true; } } #endif if (!Compiler::fgProfileWeightsEqual(weight1, 0) && dsc1->lvIsRegArg) { weight1 += 2 * BB_UNITY_WEIGHT; } if (!Compiler::fgProfileWeightsEqual(weight2, 0) && dsc2->lvIsRegArg) { weight2 += 2 * BB_UNITY_WEIGHT; } if (!Compiler::fgProfileWeightsEqual(weight1, weight2)) { return weight1 > weight2; } // If the weighted ref counts are different then try the unweighted ref counts. if (dsc1->lvRefCnt() != dsc2->lvRefCnt()) { return dsc1->lvRefCnt() > dsc2->lvRefCnt(); } // If one is a GC type and the other is not the GC type wins. if (varTypeIsGC(dsc1->TypeGet()) != varTypeIsGC(dsc2->TypeGet())) { return varTypeIsGC(dsc1->TypeGet()); } // To achieve a stable sort we use the LclNum (by way of the pointer address). return dsc1 < dsc2; } }; /***************************************************************************** * * Sort the local variable table by refcount and assign tracking indices. */ void Compiler::lvaSortByRefCount() { lvaTrackedCount = 0; lvaTrackedCountInSizeTUnits = 0; #ifdef DEBUG VarSetOps::AssignNoCopy(this, lvaTrackedVars, VarSetOps::MakeEmpty(this)); #endif if (lvaCount == 0) { return; } /* We'll sort the variables by ref count - allocate the sorted table */ if (lvaTrackedToVarNumSize < lvaCount) { lvaTrackedToVarNumSize = lvaCount; lvaTrackedToVarNum = new (getAllocator(CMK_LvaTable)) unsigned[lvaTrackedToVarNumSize]; } unsigned trackedCount = 0; unsigned* tracked = lvaTrackedToVarNum; // Fill in the table used for sorting for (unsigned lclNum = 0; lclNum < lvaCount; lclNum++) { LclVarDsc* varDsc = lvaGetDesc(lclNum); // Start by assuming that the variable will be tracked. varDsc->lvTracked = 1; if (varDsc->lvRefCnt() == 0) { // Zero ref count, make this untracked. varDsc->lvTracked = 0; varDsc->setLvRefCntWtd(0); } #if !defined(TARGET_64BIT) if (varTypeIsLong(varDsc) && varDsc->lvPromoted) { varDsc->lvTracked = 0; } #endif // !defined(TARGET_64BIT) // Variables that are address-exposed, and all struct locals, are never enregistered, or tracked. // (The struct may be promoted, and its field variables enregistered/tracked, or the VM may "normalize" // its type so that its not seen by the JIT as a struct.) // Pinned variables may not be tracked (a condition of the GCInfo representation) // or enregistered, on x86 -- it is believed that we can enregister pinned (more properly, "pinning") // references when using the general GC encoding. if (varDsc->IsAddressExposed()) { varDsc->lvTracked = 0; assert(varDsc->lvType != TYP_STRUCT || varDsc->lvDoNotEnregister); // For structs, should have set this when we set m_addrExposed. } if (varTypeIsStruct(varDsc)) { // Promoted structs will never be considered for enregistration anyway, // and the DoNotEnregister flag was used to indicate whether promotion was // independent or dependent. if (varDsc->lvPromoted) { varDsc->lvTracked = 0; } else if (!varDsc->IsEnregisterableType()) { lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::NotRegSizeStruct)); } else if (varDsc->lvType == TYP_STRUCT) { if (!varDsc->lvRegStruct && !compEnregStructLocals()) { lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::DontEnregStructs)); } else if (varDsc->lvIsMultiRegArgOrRet()) { // Prolog and return generators do not support SIMD<->general register moves. lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::IsStructArg)); } #if defined(TARGET_ARM) else if (varDsc->lvIsParam) { // On arm we prespill all struct args, // TODO-Arm-CQ: keep them in registers, it will need a fix // to "On the ARM we will spill any incoming struct args" logic in codegencommon. lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::IsStructArg)); } #endif // TARGET_ARM } } if (varDsc->lvIsStructField && (lvaGetParentPromotionType(lclNum) != PROMOTION_TYPE_INDEPENDENT)) { lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::DepField)); } if (varDsc->lvPinned) { varDsc->lvTracked = 0; #ifdef JIT32_GCENCODER lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::PinningRef)); #endif } if (opts.MinOpts() && !JitConfig.JitMinOptsTrackGCrefs() && varTypeIsGC(varDsc->TypeGet())) { varDsc->lvTracked = 0; lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::MinOptsGC)); } if (!compEnregLocals()) { lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::NoRegVars)); } #if defined(JIT32_GCENCODER) && defined(FEATURE_EH_FUNCLETS) if (lvaIsOriginalThisArg(lclNum) && (info.compMethodInfo->options & CORINFO_GENERICS_CTXT_FROM_THIS) != 0) { // For x86/Linux, we need to track "this". // However we cannot have it in tracked variables, so we set "this" pointer always untracked varDsc->lvTracked = 0; } #endif // Are we not optimizing and we have exception handlers? // if so mark all args and locals "do not enregister". // if (opts.MinOpts() && compHndBBtabCount > 0) { lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::LiveInOutOfHandler)); } else { var_types type = genActualType(varDsc->TypeGet()); switch (type) { case TYP_FLOAT: case TYP_DOUBLE: case TYP_INT: case TYP_LONG: case TYP_REF: case TYP_BYREF: #ifdef FEATURE_SIMD case TYP_SIMD8: case TYP_SIMD12: case TYP_SIMD16: case TYP_SIMD32: #endif // FEATURE_SIMD case TYP_STRUCT: break; case TYP_UNDEF: case TYP_UNKNOWN: noway_assert(!"lvType not set correctly"); varDsc->lvType = TYP_INT; FALLTHROUGH; default: varDsc->lvTracked = 0; } } if (varDsc->lvTracked) { tracked[trackedCount++] = lclNum; } } // Now sort the tracked variable table by ref-count if (compCodeOpt() == SMALL_CODE) { jitstd::sort(tracked, tracked + trackedCount, LclVarDsc_SmallCode_Less(lvaTable DEBUGARG(lvaCount))); } else { jitstd::sort(tracked, tracked + trackedCount, LclVarDsc_BlendedCode_Less(lvaTable DEBUGARG(lvaCount))); } lvaTrackedCount = min((unsigned)JitConfig.JitMaxLocalsToTrack(), trackedCount); JITDUMP("Tracked variable (%u out of %u) table:\n", lvaTrackedCount, lvaCount); // Assign indices to all the variables we've decided to track for (unsigned varIndex = 0; varIndex < lvaTrackedCount; varIndex++) { LclVarDsc* varDsc = lvaGetDesc(tracked[varIndex]); assert(varDsc->lvTracked); varDsc->lvVarIndex = static_cast<unsigned short>(varIndex); INDEBUG(if (verbose) { gtDispLclVar(tracked[varIndex]); }) JITDUMP(" [%6s]: refCnt = %4u, refCntWtd = %6s\n", varTypeName(varDsc->TypeGet()), varDsc->lvRefCnt(), refCntWtd2str(varDsc->lvRefCntWtd())); } JITDUMP("\n"); // Mark all variables past the first 'lclMAX_TRACKED' as untracked for (unsigned varIndex = lvaTrackedCount; varIndex < trackedCount; varIndex++) { LclVarDsc* varDsc = lvaGetDesc(tracked[varIndex]); assert(varDsc->lvTracked); varDsc->lvTracked = 0; } // We have a new epoch, and also cache the tracked var count in terms of size_t's sufficient to hold that many bits. lvaCurEpoch++; lvaTrackedCountInSizeTUnits = roundUp((unsigned)lvaTrackedCount, (unsigned)(sizeof(size_t) * 8)) / unsigned(sizeof(size_t) * 8); #ifdef DEBUG VarSetOps::AssignNoCopy(this, lvaTrackedVars, VarSetOps::MakeFull(this)); #endif } /***************************************************************************** * * This is called by lvaMarkLclRefs to disqualify a variable from being * considered by optAddCopies() */ void LclVarDsc::lvaDisqualifyVar() { this->lvDisqualify = true; this->lvSingleDef = false; this->lvDefStmt = nullptr; } #ifdef FEATURE_SIMD var_types LclVarDsc::GetSimdBaseType() const { CorInfoType simdBaseJitType = GetSimdBaseJitType(); if (simdBaseJitType == CORINFO_TYPE_UNDEF) { return TYP_UNKNOWN; } return JitType2PreciseVarType(simdBaseJitType); } #endif // FEATURE_SIMD unsigned LclVarDsc::lvSize() const // Size needed for storage representation. Only used for structs or TYP_BLK. { // TODO-Review: Sometimes we get called on ARM with HFA struct variables that have been promoted, // where the struct itself is no longer used because all access is via its member fields. // When that happens, the struct is marked as unused and its type has been changed to // TYP_INT (to keep the GC tracking code from looking at it). // See Compiler::raAssignVars() for details. For example: // N002 ( 4, 3) [00EA067C] ------------- return struct $346 // N001 ( 3, 2) [00EA0628] ------------- lclVar struct(U) V03 loc2 // float V03.f1 (offs=0x00) -> V12 tmp7 // f8 (last use) (last use) $345 // Here, the "struct(U)" shows that the "V03 loc2" variable is unused. Not shown is that V03 // is now TYP_INT in the local variable table. It's not really unused, because it's in the tree. assert(varTypeIsStruct(lvType) || (lvType == TYP_BLK) || (lvPromoted && lvUnusedStruct)); if (lvIsParam) { assert(varTypeIsStruct(lvType)); const bool isFloatHfa = (lvIsHfa() && (GetHfaType() == TYP_FLOAT)); const unsigned argSizeAlignment = Compiler::eeGetArgSizeAlignment(lvType, isFloatHfa); return roundUp(lvExactSize, argSizeAlignment); } #if defined(FEATURE_SIMD) && !defined(TARGET_64BIT) // For 32-bit architectures, we make local variable SIMD12 types 16 bytes instead of just 12. We can't do // this for arguments, which must be passed according the defined ABI. We don't want to do this for // dependently promoted struct fields, but we don't know that here. See lvaMapSimd12ToSimd16(). // (Note that for 64-bits, we are already rounding up to 16.) if (lvType == TYP_SIMD12) { assert(!lvIsParam); assert(lvExactSize == 12); return 16; } #endif // defined(FEATURE_SIMD) && !defined(TARGET_64BIT) return roundUp(lvExactSize, TARGET_POINTER_SIZE); } /********************************************************************************** * Get stack size of the varDsc. */ size_t LclVarDsc::lvArgStackSize() const { // Make sure this will have a stack size assert(!this->lvIsRegArg); size_t stackSize = 0; if (varTypeIsStruct(this)) { #if defined(WINDOWS_AMD64_ABI) // Structs are either passed by reference or can be passed by value using one pointer stackSize = TARGET_POINTER_SIZE; #elif defined(TARGET_ARM64) || defined(UNIX_AMD64_ABI) // lvSize performs a roundup. stackSize = this->lvSize(); #if defined(TARGET_ARM64) if ((stackSize > TARGET_POINTER_SIZE * 2) && (!this->lvIsHfa())) { // If the size is greater than 16 bytes then it will // be passed by reference. stackSize = TARGET_POINTER_SIZE; } #endif // defined(TARGET_ARM64) #else // !TARGET_ARM64 !WINDOWS_AMD64_ABI !UNIX_AMD64_ABI NYI("Unsupported target."); unreached(); #endif // !TARGET_ARM64 !WINDOWS_AMD64_ABI !UNIX_AMD64_ABI } else { stackSize = TARGET_POINTER_SIZE; } return stackSize; } //------------------------------------------------------------------------ // GetRegisterType: Determine register type for this local var. // // Arguments: // tree - node that uses the local, its type is checked first. // // Return Value: // TYP_UNDEF if the layout is not enregistrable, the register type otherwise. // var_types LclVarDsc::GetRegisterType(const GenTreeLclVarCommon* tree) const { var_types targetType = tree->gtType; var_types lclVarType = TypeGet(); if (targetType == TYP_STRUCT) { if (lclVarType == TYP_STRUCT) { assert(!tree->OperIsLocalField() && "do not expect struct local fields."); lclVarType = GetLayout()->GetRegisterType(); } targetType = lclVarType; } #ifdef DEBUG if ((targetType != TYP_UNDEF) && tree->OperIs(GT_STORE_LCL_VAR) && lvNormalizeOnStore()) { const bool phiStore = (tree->gtGetOp1()->OperIsNonPhiLocal() == false); // Ensure that the lclVar node is typed correctly, // does not apply to phi-stores because they do not produce code in the merge block. assert(phiStore || targetType == genActualType(lclVarType)); } #endif return targetType; } //------------------------------------------------------------------------ // GetRegisterType: Determine register type for this local var. // // Return Value: // TYP_UNDEF if the layout is not enregistrable, the register type otherwise. // var_types LclVarDsc::GetRegisterType() const { if (TypeGet() != TYP_STRUCT) { #if !defined(TARGET_64BIT) if (TypeGet() == TYP_LONG) { return TYP_UNDEF; } #endif return TypeGet(); } assert(m_layout != nullptr); return m_layout->GetRegisterType(); } //------------------------------------------------------------------------ // GetActualRegisterType: Determine an actual register type for this local var. // // Return Value: // TYP_UNDEF if the layout is not enregistrable, the register type otherwise. // var_types LclVarDsc::GetActualRegisterType() const { return genActualType(GetRegisterType()); } //---------------------------------------------------------------------------------------------- // CanBeReplacedWithItsField: check if a whole struct reference could be replaced by a field. // // Arguments: // comp - the compiler instance; // // Return Value: // true if that can be replaced, false otherwise. // // Notes: // The replacement can be made only for independently promoted structs // with 1 field without holes. // bool LclVarDsc::CanBeReplacedWithItsField(Compiler* comp) const { if (!lvPromoted) { return false; } if (comp->lvaGetPromotionType(this) != Compiler::PROMOTION_TYPE_INDEPENDENT) { return false; } if (lvFieldCnt != 1) { return false; } if (lvContainsHoles) { return false; } #if defined(FEATURE_SIMD) // If we return `struct A { SIMD16 a; }` we split the struct into several fields. // In order to do that we have to have its field `a` in memory. Right now lowering cannot // handle RETURN struct(multiple registers)->SIMD16(one register), but it can be improved. LclVarDsc* fieldDsc = comp->lvaGetDesc(lvFieldLclStart); if (varTypeIsSIMD(fieldDsc)) { return false; } #endif // FEATURE_SIMD return true; } //------------------------------------------------------------------------ // lvaMarkLclRefs: increment local var references counts and more // // Arguments: // tree - some node in a tree // block - block that the tree node belongs to // stmt - stmt that the tree node belongs to // isRecompute - true if we should just recompute counts // // Notes: // Invoked via the MarkLocalVarsVisitor // // Primarily increments the regular and weighted local var ref // counts for any local referred to directly by tree. // // Also: // // Accounts for implicit references to frame list root for // pinvokes that will be expanded later. // // Determines if locals of TYP_BOOL can safely be considered // to hold only 0 or 1 or may have a broader range of true values. // // Does some setup work for assertion prop, noting locals that are // eligible for assertion prop, single defs, and tracking which blocks // hold uses. // // Looks for uses of generic context and sets lvaGenericsContextInUse. // // In checked builds: // // Verifies that local accesses are consistenly typed. // Verifies that casts remain in bounds. void Compiler::lvaMarkLclRefs(GenTree* tree, BasicBlock* block, Statement* stmt, bool isRecompute) { const weight_t weight = block->getBBWeight(this); /* Is this a call to unmanaged code ? */ if (tree->IsCall() && compMethodRequiresPInvokeFrame()) { assert((!opts.ShouldUsePInvokeHelpers()) || (info.compLvFrameListRoot == BAD_VAR_NUM)); if (!opts.ShouldUsePInvokeHelpers()) { /* Get the special variable descriptor */ LclVarDsc* varDsc = lvaGetDesc(info.compLvFrameListRoot); /* Increment the ref counts twice */ varDsc->incRefCnts(weight, this); varDsc->incRefCnts(weight, this); } } if (!isRecompute) { /* Is this an assigment? */ if (tree->OperIs(GT_ASG)) { GenTree* op1 = tree->AsOp()->gtOp1; GenTree* op2 = tree->AsOp()->gtOp2; /* Is this an assignment to a local variable? */ if (op1->gtOper == GT_LCL_VAR && op2->gtType != TYP_BOOL) { /* Only simple assignments allowed for booleans */ if (tree->gtOper != GT_ASG) { goto NOT_BOOL; } /* Is the RHS clearly a boolean value? */ switch (op2->gtOper) { unsigned lclNum; case GT_CNS_INT: if (op2->AsIntCon()->gtIconVal == 0) { break; } if (op2->AsIntCon()->gtIconVal == 1) { break; } // Not 0 or 1, fall through .... FALLTHROUGH; default: if (op2->OperIsCompare()) { break; } NOT_BOOL: lclNum = op1->AsLclVarCommon()->GetLclNum(); noway_assert(lclNum < lvaCount); lvaTable[lclNum].lvIsBoolean = false; break; } } } } if (tree->OperIsLocalAddr()) { LclVarDsc* varDsc = lvaGetDesc(tree->AsLclVarCommon()); assert(varDsc->IsAddressExposed()); varDsc->incRefCnts(weight, this); return; } if ((tree->gtOper != GT_LCL_VAR) && (tree->gtOper != GT_LCL_FLD)) { return; } /* This must be a local variable reference */ // See if this is a generics context use. if ((tree->gtFlags & GTF_VAR_CONTEXT) != 0) { assert(tree->OperIs(GT_LCL_VAR)); if (!lvaGenericsContextInUse) { JITDUMP("-- generic context in use at [%06u]\n", dspTreeID(tree)); lvaGenericsContextInUse = true; } } assert((tree->gtOper == GT_LCL_VAR) || (tree->gtOper == GT_LCL_FLD)); unsigned lclNum = tree->AsLclVarCommon()->GetLclNum(); LclVarDsc* varDsc = lvaGetDesc(lclNum); /* Increment the reference counts */ varDsc->incRefCnts(weight, this); #ifdef DEBUG if (varDsc->lvIsStructField) { // If ref count was increased for struct field, ensure that the // parent struct is still promoted. LclVarDsc* parentStruct = lvaGetDesc(varDsc->lvParentLcl); assert(!parentStruct->lvUndoneStructPromotion); } #endif if (!isRecompute) { if (lvaVarAddrExposed(lclNum)) { varDsc->lvIsBoolean = false; } if (tree->gtOper == GT_LCL_FLD) { // variables that have uses inside a GT_LCL_FLD // cause problems, so we will disqualify them here varDsc->lvaDisqualifyVar(); return; } if (fgDomsComputed && IsDominatedByExceptionalEntry(block)) { SetVolatileHint(varDsc); } /* Record if the variable has a single def or not */ if (!varDsc->lvDisqualify) // If this variable is already disqualified, we can skip this { if (tree->gtFlags & GTF_VAR_DEF) // Is this is a def of our variable { /* If we have one of these cases: 1. We have already seen a definition (i.e lvSingleDef is true) 2. or info.CompInitMem is true (thus this would be the second definition) 3. or we have an assignment inside QMARK-COLON trees 4. or we have an update form of assignment (i.e. +=, -=, *=) Then we must disqualify this variable for use in optAddCopies() Note that all parameters start out with lvSingleDef set to true */ if ((varDsc->lvSingleDef == true) || (info.compInitMem == true) || (tree->gtFlags & GTF_COLON_COND) || (tree->gtFlags & GTF_VAR_USEASG)) { varDsc->lvaDisqualifyVar(); } else { varDsc->lvSingleDef = true; varDsc->lvDefStmt = stmt; } } else // otherwise this is a ref of our variable { if (BlockSetOps::MayBeUninit(varDsc->lvRefBlks)) { // Lazy initialization BlockSetOps::AssignNoCopy(this, varDsc->lvRefBlks, BlockSetOps::MakeEmpty(this)); } BlockSetOps::AddElemD(this, varDsc->lvRefBlks, block->bbNum); } } if (!varDsc->lvDisqualifySingleDefRegCandidate) // If this var is already disqualified, we can skip this { if (tree->gtFlags & GTF_VAR_DEF) // Is this is a def of our variable { bool bbInALoop = (block->bbFlags & BBF_BACKWARD_JUMP) != 0; bool bbIsReturn = block->bbJumpKind == BBJ_RETURN; // TODO: Zero-inits in LSRA are created with below condition. But if filter out based on that condition // we filter lot of interesting variables that would benefit otherwise with EH var enregistration. // bool needsExplicitZeroInit = !varDsc->lvIsParam && (info.compInitMem || // varTypeIsGC(varDsc->TypeGet())); bool needsExplicitZeroInit = fgVarNeedsExplicitZeroInit(lclNum, bbInALoop, bbIsReturn); if (varDsc->lvSingleDefRegCandidate || needsExplicitZeroInit) { #ifdef DEBUG if (needsExplicitZeroInit) { varDsc->lvSingleDefDisqualifyReason = 'Z'; JITDUMP("V%02u needs explicit zero init. Disqualified as a single-def register candidate.\n", lclNum); } else { varDsc->lvSingleDefDisqualifyReason = 'M'; JITDUMP("V%02u has multiple definitions. Disqualified as a single-def register candidate.\n", lclNum); } #endif // DEBUG varDsc->lvSingleDefRegCandidate = false; varDsc->lvDisqualifySingleDefRegCandidate = true; } else { #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE // TODO-CQ: If the varType needs partial callee save, conservatively do not enregister // such variable. In future, need to enable enregisteration for such variables. if (!varTypeNeedsPartialCalleeSave(varDsc->GetRegisterType())) #endif { varDsc->lvSingleDefRegCandidate = true; JITDUMP("Marking EH Var V%02u as a register candidate.\n", lclNum); } } } } bool allowStructs = false; #ifdef UNIX_AMD64_ABI // On System V the type of the var could be a struct type. allowStructs = varTypeIsStruct(varDsc); #endif // UNIX_AMD64_ABI /* Variables must be used as the same type throughout the method */ noway_assert(varDsc->lvType == TYP_UNDEF || tree->gtType == TYP_UNKNOWN || allowStructs || genActualType(varDsc->TypeGet()) == genActualType(tree->gtType) || (tree->gtType == TYP_BYREF && varDsc->TypeGet() == TYP_I_IMPL) || (tree->gtType == TYP_I_IMPL && varDsc->TypeGet() == TYP_BYREF) || (tree->gtFlags & GTF_VAR_CAST) || (varTypeIsFloating(varDsc) && varTypeIsFloating(tree)) || (varTypeIsStruct(varDsc) == varTypeIsStruct(tree))); /* Remember the type of the reference */ if (tree->gtType == TYP_UNKNOWN || varDsc->lvType == TYP_UNDEF) { varDsc->lvType = tree->gtType; noway_assert(genActualType(varDsc->TypeGet()) == tree->gtType); // no truncation } #ifdef DEBUG if (tree->gtFlags & GTF_VAR_CAST) { // it should never be bigger than the variable slot // Trees don't store the full information about structs // so we can't check them. if (tree->TypeGet() != TYP_STRUCT) { unsigned treeSize = genTypeSize(tree->TypeGet()); unsigned varSize = genTypeSize(varDsc->TypeGet()); if (varDsc->TypeGet() == TYP_STRUCT) { varSize = varDsc->lvSize(); } assert(treeSize <= varSize); } } #endif } } //------------------------------------------------------------------------ // IsDominatedByExceptionalEntry: Check is the block dominated by an exception entry block. // // Arguments: // block - the checking block. // bool Compiler::IsDominatedByExceptionalEntry(BasicBlock* block) { assert(fgDomsComputed); return block->IsDominatedByExceptionalEntryFlag(); } //------------------------------------------------------------------------ // SetVolatileHint: Set a local var's volatile hint. // // Arguments: // varDsc - the local variable that needs the hint. // void Compiler::SetVolatileHint(LclVarDsc* varDsc) { varDsc->lvVolatileHint = true; } //------------------------------------------------------------------------ // lvaMarkLocalVars: update local var ref counts for IR in a basic block // // Arguments: // block - the block in question // isRecompute - true if counts are being recomputed // // Notes: // Invokes lvaMarkLclRefs on each tree node for each // statement in the block. void Compiler::lvaMarkLocalVars(BasicBlock* block, bool isRecompute) { class MarkLocalVarsVisitor final : public GenTreeVisitor<MarkLocalVarsVisitor> { private: BasicBlock* m_block; Statement* m_stmt; bool m_isRecompute; public: enum { DoPreOrder = true, }; MarkLocalVarsVisitor(Compiler* compiler, BasicBlock* block, Statement* stmt, bool isRecompute) : GenTreeVisitor<MarkLocalVarsVisitor>(compiler), m_block(block), m_stmt(stmt), m_isRecompute(isRecompute) { } Compiler::fgWalkResult PreOrderVisit(GenTree** use, GenTree* user) { // TODO: Stop passing isRecompute once we are sure that this assert is never hit. assert(!m_isRecompute); m_compiler->lvaMarkLclRefs(*use, m_block, m_stmt, m_isRecompute); return WALK_CONTINUE; } }; JITDUMP("\n*** %s local variables in block " FMT_BB " (weight=%s)\n", isRecompute ? "recomputing" : "marking", block->bbNum, refCntWtd2str(block->getBBWeight(this))); for (Statement* const stmt : block->NonPhiStatements()) { MarkLocalVarsVisitor visitor(this, block, stmt, isRecompute); DISPSTMT(stmt); visitor.WalkTree(stmt->GetRootNodePointer(), nullptr); } } //------------------------------------------------------------------------ // lvaMarkLocalVars: enable normal ref counting, compute initial counts, sort locals table // // Notes: // Now behaves differently in minopts / debug. Instead of actually inspecting // the IR and counting references, the jit assumes all locals are referenced // and does not sort the locals table. // // Also, when optimizing, lays the groundwork for assertion prop and more. // See details in lvaMarkLclRefs. void Compiler::lvaMarkLocalVars() { JITDUMP("\n*************** In lvaMarkLocalVars()"); // If we have direct pinvokes, verify the frame list root local was set up properly if (compMethodRequiresPInvokeFrame()) { assert((!opts.ShouldUsePInvokeHelpers()) || (info.compLvFrameListRoot == BAD_VAR_NUM)); if (!opts.ShouldUsePInvokeHelpers()) { noway_assert(info.compLvFrameListRoot >= info.compLocalsCount && info.compLvFrameListRoot < lvaCount); } } #if !defined(FEATURE_EH_FUNCLETS) // Grab space for exception handling if (ehNeedsShadowSPslots()) { // The first slot is reserved for ICodeManager::FixContext(ppEndRegion) // ie. the offset of the end-of-last-executed-filter unsigned slotsNeeded = 1; unsigned handlerNestingLevel = ehMaxHndNestingCount; if (opts.compDbgEnC && (handlerNestingLevel < (unsigned)MAX_EnC_HANDLER_NESTING_LEVEL)) handlerNestingLevel = (unsigned)MAX_EnC_HANDLER_NESTING_LEVEL; slotsNeeded += handlerNestingLevel; // For a filter (which can be active at the same time as a catch/finally handler) slotsNeeded++; // For zero-termination of the shadow-Stack-pointer chain slotsNeeded++; lvaShadowSPslotsVar = lvaGrabTempWithImplicitUse(false DEBUGARG("lvaShadowSPslotsVar")); LclVarDsc* shadowSPslotsVar = lvaGetDesc(lvaShadowSPslotsVar); shadowSPslotsVar->lvType = TYP_BLK; shadowSPslotsVar->lvExactSize = (slotsNeeded * TARGET_POINTER_SIZE); } #endif // !FEATURE_EH_FUNCLETS // PSPSym and LocAllocSPvar are not used by the CoreRT ABI if (!IsTargetAbi(CORINFO_CORERT_ABI)) { #if defined(FEATURE_EH_FUNCLETS) if (ehNeedsPSPSym()) { lvaPSPSym = lvaGrabTempWithImplicitUse(false DEBUGARG("PSPSym")); LclVarDsc* lclPSPSym = lvaGetDesc(lvaPSPSym); lclPSPSym->lvType = TYP_I_IMPL; lvaSetVarDoNotEnregister(lvaPSPSym DEBUGARG(DoNotEnregisterReason::VMNeedsStackAddr)); } #endif // FEATURE_EH_FUNCLETS #ifdef JIT32_GCENCODER // LocAllocSPvar is only required by the implicit frame layout expected by the VM on x86. Whether // a function contains a Localloc is conveyed in the GC information, in the InfoHdrSmall.localloc // field. The function must have an EBP frame. Then, the VM finds the LocAllocSP slot by assuming // the following stack layout: // // -- higher addresses -- // saved EBP <-- EBP points here // other callee-saved registers // InfoHdrSmall.savedRegsCountExclFP specifies this size // optional GS cookie // InfoHdrSmall.security is 1 if this exists // LocAllocSP slot // -- lower addresses -- // // See also eetwain.cpp::GetLocallocSPOffset() and its callers. if (compLocallocUsed) { lvaLocAllocSPvar = lvaGrabTempWithImplicitUse(false DEBUGARG("LocAllocSPvar")); LclVarDsc* locAllocSPvar = lvaGetDesc(lvaLocAllocSPvar); locAllocSPvar->lvType = TYP_I_IMPL; } #endif // JIT32_GCENCODER } // Ref counting is now enabled normally. lvaRefCountState = RCS_NORMAL; #if defined(DEBUG) const bool setSlotNumbers = true; #else const bool setSlotNumbers = opts.compScopeInfo && (info.compVarScopesCount > 0); #endif // defined(DEBUG) const bool isRecompute = false; lvaComputeRefCounts(isRecompute, setSlotNumbers); // If we don't need precise reference counts, e.g. we're not optimizing, we're done. if (!PreciseRefCountsRequired()) { return; } const bool reportParamTypeArg = lvaReportParamTypeArg(); // Update bookkeeping on the generic context. if (lvaKeepAliveAndReportThis()) { lvaGetDesc(0u)->lvImplicitlyReferenced = reportParamTypeArg; } else if (lvaReportParamTypeArg()) { // We should have a context arg. assert(info.compTypeCtxtArg != (int)BAD_VAR_NUM); lvaGetDesc(info.compTypeCtxtArg)->lvImplicitlyReferenced = reportParamTypeArg; } assert(PreciseRefCountsRequired()); // Note: optAddCopies() depends on lvaRefBlks, which is set in lvaMarkLocalVars(BasicBlock*), called above. optAddCopies(); } //------------------------------------------------------------------------ // lvaComputeRefCounts: compute ref counts for locals // // Arguments: // isRecompute -- true if we just want ref counts and no other side effects; // false means to also look for true boolean locals, lay // groundwork for assertion prop, check type consistency, etc. // See lvaMarkLclRefs for details on what else goes on. // setSlotNumbers -- true if local slot numbers should be assigned. // // Notes: // Some implicit references are given actual counts or weight bumps here // to match pre-existing behavior. // // In fast-jitting modes where we don't ref count locals, this bypasses // actual counting, and makes all locals implicitly referenced on first // compute. It asserts all locals are implicitly referenced on recompute. // // When optimizing we also recompute lvaGenericsContextInUse based // on specially flagged LCL_VAR appearances. // void Compiler::lvaComputeRefCounts(bool isRecompute, bool setSlotNumbers) { JITDUMP("\n*** lvaComputeRefCounts ***\n"); unsigned lclNum = 0; LclVarDsc* varDsc = nullptr; // Fast path for minopts and debug codegen. // // On first compute: mark all locals as implicitly referenced and untracked. // On recompute: do nothing. if (!PreciseRefCountsRequired()) { if (isRecompute) { #if defined(DEBUG) // All local vars should be marked as implicitly referenced // and not tracked. for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++) { const bool isSpecialVarargsParam = varDsc->lvIsParam && raIsVarargsStackArg(lclNum); if (isSpecialVarargsParam) { assert(varDsc->lvRefCnt() == 0); } else { assert(varDsc->lvImplicitlyReferenced); } assert(!varDsc->lvTracked); } #endif // defined (DEBUG) return; } // First compute. for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++) { // Using lvImplicitlyReferenced here ensures that we can't // accidentally make locals be unreferenced later by decrementing // the ref count to zero. // // If, in minopts/debug, we really want to allow locals to become // unreferenced later, we'll have to explicitly clear this bit. varDsc->setLvRefCnt(0); varDsc->setLvRefCntWtd(BB_ZERO_WEIGHT); // Special case for some varargs params ... these must // remain unreferenced. const bool isSpecialVarargsParam = varDsc->lvIsParam && raIsVarargsStackArg(lclNum); if (!isSpecialVarargsParam) { varDsc->lvImplicitlyReferenced = 1; } varDsc->lvTracked = 0; if (setSlotNumbers) { varDsc->lvSlotNum = lclNum; } // Assert that it's ok to bypass the type repair logic in lvaMarkLclRefs assert((varDsc->lvType != TYP_UNDEF) && (varDsc->lvType != TYP_VOID) && (varDsc->lvType != TYP_UNKNOWN)); } lvaCurEpoch++; lvaTrackedCount = 0; lvaTrackedCountInSizeTUnits = 0; return; } // Slower path we take when optimizing, to get accurate counts. // // First, reset all explicit ref counts and weights. for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++) { varDsc->setLvRefCnt(0); varDsc->setLvRefCntWtd(BB_ZERO_WEIGHT); if (setSlotNumbers) { varDsc->lvSlotNum = lclNum; } // Set initial value for lvSingleDef for explicit and implicit // argument locals as they are "defined" on entry. // However, if we are just recomputing the ref counts, retain the value // that was set by past phases. if (!isRecompute) { varDsc->lvSingleDef = varDsc->lvIsParam; varDsc->lvSingleDefRegCandidate = varDsc->lvIsParam; } } // Remember current state of generic context use, and prepare // to compute new state. const bool oldLvaGenericsContextInUse = lvaGenericsContextInUse; lvaGenericsContextInUse = false; JITDUMP("\n*** lvaComputeRefCounts -- explicit counts ***\n"); // Second, account for all explicit local variable references for (BasicBlock* const block : Blocks()) { if (block->IsLIR()) { assert(isRecompute); const weight_t weight = block->getBBWeight(this); for (GenTree* node : LIR::AsRange(block)) { switch (node->OperGet()) { case GT_LCL_VAR: case GT_LCL_FLD: case GT_LCL_VAR_ADDR: case GT_LCL_FLD_ADDR: case GT_STORE_LCL_VAR: case GT_STORE_LCL_FLD: { LclVarDsc* varDsc = lvaGetDesc(node->AsLclVarCommon()); // If this is an EH var, use a zero weight for defs, so that we don't // count those in our heuristic for register allocation, since they always // must be stored, so there's no value in enregistering them at defs; only // if there are enough uses to justify it. if (varDsc->lvLiveInOutOfHndlr && !varDsc->lvDoNotEnregister && ((node->gtFlags & GTF_VAR_DEF) != 0)) { varDsc->incRefCnts(0, this); } else { varDsc->incRefCnts(weight, this); } if ((node->gtFlags & GTF_VAR_CONTEXT) != 0) { assert(node->OperIs(GT_LCL_VAR)); lvaGenericsContextInUse = true; } break; } default: break; } } } else { lvaMarkLocalVars(block, isRecompute); } } if (oldLvaGenericsContextInUse && !lvaGenericsContextInUse) { // Context was in use but no longer is. This can happen // if we're able to optimize, so just leave a note. JITDUMP("\n** Generics context no longer in use\n"); } else if (lvaGenericsContextInUse && !oldLvaGenericsContextInUse) { // Context was not in use but now is. // // Changing from unused->used should never happen; creation of any new IR // for context use should also be settting lvaGenericsContextInUse. assert(!"unexpected new use of generics context"); } JITDUMP("\n*** lvaComputeRefCounts -- implicit counts ***\n"); // Third, bump ref counts for some implicit prolog references for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++) { // Todo: review justification for these count bumps. if (varDsc->lvIsRegArg) { if ((lclNum < info.compArgsCount) && (varDsc->lvRefCnt() > 0)) { // Fix 388376 ARM JitStress WP7 varDsc->incRefCnts(BB_UNITY_WEIGHT, this); varDsc->incRefCnts(BB_UNITY_WEIGHT, this); } // Ref count bump that was in lvaPromoteStructVar // // This was formerly done during RCS_EARLY counting, // and we did not used to reset counts like we do now. if (varDsc->lvIsStructField) { varDsc->incRefCnts(BB_UNITY_WEIGHT, this); } } // If we have JMP, all arguments must have a location // even if we don't use them inside the method if (compJmpOpUsed && varDsc->lvIsParam && (varDsc->lvRefCnt() == 0)) { // except when we have varargs and the argument is // passed on the stack. In that case, it's important // for the ref count to be zero, so that we don't attempt // to track them for GC info (which is not possible since we // don't know their offset in the stack). See the assert at the // end of raMarkStkVars and bug #28949 for more info. if (!raIsVarargsStackArg(lclNum)) { varDsc->lvImplicitlyReferenced = 1; } } } } void Compiler::lvaAllocOutgoingArgSpaceVar() { #if FEATURE_FIXED_OUT_ARGS // Setup the outgoing argument region, in case we end up using it later if (lvaOutgoingArgSpaceVar == BAD_VAR_NUM) { lvaOutgoingArgSpaceVar = lvaGrabTemp(false DEBUGARG("OutgoingArgSpace")); lvaTable[lvaOutgoingArgSpaceVar].lvType = TYP_LCLBLK; lvaTable[lvaOutgoingArgSpaceVar].lvImplicitlyReferenced = 1; } noway_assert(lvaOutgoingArgSpaceVar >= info.compLocalsCount && lvaOutgoingArgSpaceVar < lvaCount); #endif // FEATURE_FIXED_OUT_ARGS } inline void Compiler::lvaIncrementFrameSize(unsigned size) { if (size > MAX_FrameSize || compLclFrameSize + size > MAX_FrameSize) { BADCODE("Frame size overflow"); } compLclFrameSize += size; } /**************************************************************************** * * Return true if absolute offsets of temps are larger than vars, or in other * words, did we allocate temps before of after vars. The /GS buffer overrun * checks want temps to be at low stack addresses than buffers */ bool Compiler::lvaTempsHaveLargerOffsetThanVars() { #ifdef TARGET_ARM // We never want to place the temps with larger offsets for ARM return false; #else if (compGSReorderStackLayout) { return codeGen->isFramePointerUsed(); } else { return true; } #endif } /**************************************************************************** * * Return an upper bound estimate for the size of the compiler spill temps * */ unsigned Compiler::lvaGetMaxSpillTempSize() { unsigned result = 0; if (codeGen->regSet.hasComputedTmpSize()) { result = codeGen->regSet.tmpGetTotalSize(); } else { result = MAX_SPILL_TEMP_SIZE; } return result; } // clang-format off /***************************************************************************** * * Compute stack frame offsets for arguments, locals and optionally temps. * * The frame is laid out as follows for x86: * * ESP frames * * | | * |-----------------------| * | incoming | * | arguments | * |-----------------------| <---- Virtual '0' * | return address | * +=======================+ * |Callee saved registers | * |-----------------------| * | Temps | * |-----------------------| * | Variables | * |-----------------------| <---- Ambient ESP * | Arguments for the | * ~ next function ~ * | | * | | | * | | Stack grows | * | downward * V * * * EBP frames * * | | * |-----------------------| * | incoming | * | arguments | * |-----------------------| <---- Virtual '0' * | return address | * +=======================+ * | incoming EBP | * |-----------------------| <---- EBP * |Callee saved registers | * |-----------------------| * | security object | * |-----------------------| * | ParamTypeArg | * |-----------------------| * | Last-executed-filter | * |-----------------------| * | | * ~ Shadow SPs ~ * | | * |-----------------------| * | | * ~ Variables ~ * | | * ~-----------------------| * | Temps | * |-----------------------| * | localloc | * |-----------------------| <---- Ambient ESP * | Arguments for the | * | next function ~ * | | * | | | * | | Stack grows | * | downward * V * * * The frame is laid out as follows for x64: * * RSP frames * | | * |-----------------------| * | incoming | * | arguments | * |-----------------------| * | 4 fixed incoming | * | argument slots | * |-----------------------| <---- Caller's SP & Virtual '0' * | return address | * +=======================+ * | Callee saved Int regs | * ------------------------- * | Padding | <---- this padding (0 or 8 bytes) is to ensure flt registers are saved at a mem location aligned at 16-bytes * | | so that we can save 128-bit callee saved xmm regs using performant "movaps" instruction instead of "movups" * ------------------------- * | Callee saved Flt regs | <----- entire 128-bits of callee saved xmm registers are stored here * |-----------------------| * | Temps | * |-----------------------| * | Variables | * |-----------------------| * | Arguments for the | * ~ next function ~ * | | * |-----------------------| * | 4 fixed outgoing | * | argument slots | * |-----------------------| <---- Ambient RSP * | | | * ~ | Stack grows ~ * | | downward | * V * * * RBP frames * | | * |-----------------------| * | incoming | * | arguments | * |-----------------------| * | 4 fixed incoming | * | argument slots | * |-----------------------| <---- Caller's SP & Virtual '0' * | return address | * +=======================+ * | Callee saved Int regs | * ------------------------- * | Padding | * ------------------------- * | Callee saved Flt regs | * |-----------------------| * | security object | * |-----------------------| * | ParamTypeArg | * |-----------------------| * | | * | | * ~ Variables ~ * | | * | | * |-----------------------| * | Temps | * |-----------------------| * | | * ~ localloc ~ // not in frames with EH * | | * |-----------------------| * | PSPSym | // only in frames with EH (thus no localloc) * | | * |-----------------------| <---- RBP in localloc frames (max 240 bytes from Initial-SP) * | Arguments for the | * ~ next function ~ * | | * |-----------------------| * | 4 fixed outgoing | * | argument slots | * |-----------------------| <---- Ambient RSP (before localloc, this is Initial-SP) * | | | * ~ | Stack grows ~ * | | downward | * V * * * The frame is laid out as follows for ARM (this is a general picture; details may differ for different conditions): * * SP frames * | | * |-----------------------| * | incoming | * | arguments | * +=======================+ <---- Caller's SP * | Pre-spill registers | * |-----------------------| <---- Virtual '0' * |Callee saved registers | * |-----------------------| * ~ possible double align ~ * |-----------------------| * | security object | * |-----------------------| * | ParamTypeArg | * |-----------------------| * | possible GS cookie | * |-----------------------| * | Variables | * |-----------------------| * | possible GS cookie | * |-----------------------| * | Temps | * |-----------------------| * | Stub Argument Var | * |-----------------------| * |Inlined PInvoke Frame V| * |-----------------------| * ~ possible double align ~ * |-----------------------| * | Arguments for the | * ~ next function ~ * | | * |-----------------------| <---- Ambient SP * | | | * ~ | Stack grows ~ * | | downward | * V * * * FP / R11 frames * | | * |-----------------------| * | incoming | * | arguments | * +=======================+ <---- Caller's SP * | Pre-spill registers | * |-----------------------| <---- Virtual '0' * |Callee saved registers | * |-----------------------| * | PSPSym | // Only for frames with EH, which means FP-based frames * |-----------------------| * ~ possible double align ~ * |-----------------------| * | security object | * |-----------------------| * | ParamTypeArg | * |-----------------------| * | possible GS cookie | * |-----------------------| * | Variables | * |-----------------------| * | possible GS cookie | * |-----------------------| * | Temps | * |-----------------------| * | Stub Argument Var | * |-----------------------| * |Inlined PInvoke Frame V| * |-----------------------| * ~ possible double align ~ * |-----------------------| * | localloc | * |-----------------------| * | Arguments for the | * ~ next function ~ * | | * |-----------------------| <---- Ambient SP * | | | * ~ | Stack grows ~ * | | downward | * V * * * The frame is laid out as follows for ARM64 (this is a general picture; details may differ for different conditions): * NOTE: SP must be 16-byte aligned, so there may be alignment slots in the frame. * We will often save and establish a frame pointer to create better ETW stack walks. * * SP frames * | | * |-----------------------| * | incoming | * | arguments | * +=======================+ <---- Caller's SP * | homed | // this is only needed if reg argument need to be homed, e.g., for varargs * | register arguments | * |-----------------------| <---- Virtual '0' * |Callee saved registers | * | except fp/lr | * |-----------------------| * | security object | * |-----------------------| * | ParamTypeArg | * |-----------------------| * | possible GS cookie | * |-----------------------| * | Variables | * |-----------------------| * | possible GS cookie | * |-----------------------| * | Temps | * |-----------------------| * | Stub Argument Var | * |-----------------------| * |Inlined PInvoke Frame V| * |-----------------------| * | Saved LR | * |-----------------------| * | Saved FP | <---- Frame pointer * |-----------------------| * | Stack arguments for | * | the next function | * |-----------------------| <---- SP * | | | * ~ | Stack grows ~ * | | downward | * V * * * FP (R29 / x29) frames * | | * |-----------------------| * | incoming | * | arguments | * +=======================+ <---- Caller's SP * | optional homed | // this is only needed if reg argument need to be homed, e.g., for varargs * | register arguments | * |-----------------------| <---- Virtual '0' * |Callee saved registers | * | except fp/lr | * |-----------------------| * | PSPSym | // Only for frames with EH, which requires FP-based frames * |-----------------------| * | security object | * |-----------------------| * | ParamTypeArg | * |-----------------------| * | possible GS cookie | * |-----------------------| * | Variables | * |-----------------------| * | possible GS cookie | * |-----------------------| * | Temps | * |-----------------------| * | Stub Argument Var | * |-----------------------| * |Inlined PInvoke Frame V| * |-----------------------| * | Saved LR | * |-----------------------| * | Saved FP | <---- Frame pointer * |-----------------------| * ~ localloc ~ * |-----------------------| * | Stack arguments for | * | the next function | * |-----------------------| <---- Ambient SP * | | | * ~ | Stack grows ~ * | | downward | * V * * * FP (R29 / x29) frames where FP/LR are stored at the top of the frame (frames requiring GS that have localloc) * | | * |-----------------------| * | incoming | * | arguments | * +=======================+ <---- Caller's SP * | optional homed | // this is only needed if reg argument need to be homed, e.g., for varargs * | register arguments | * |-----------------------| <---- Virtual '0' * | Saved LR | * |-----------------------| * | Saved FP | <---- Frame pointer * |-----------------------| * |Callee saved registers | * |-----------------------| * | PSPSym | // Only for frames with EH, which requires FP-based frames * |-----------------------| * | security object | * |-----------------------| * | ParamTypeArg | * |-----------------------| * | possible GS cookie | * |-----------------------| * | Variables | * |-----------------------| * | possible GS cookie | * |-----------------------| * | Temps | * |-----------------------| * | Stub Argument Var | * |-----------------------| * |Inlined PInvoke Frame V| * |-----------------------| * ~ localloc ~ * |-----------------------| * | Stack arguments for | * | the next function | * |-----------------------| <---- Ambient SP * | | | * ~ | Stack grows ~ * | | downward | * V * * * Doing this all in one pass is 'hard'. So instead we do it in 2 basic passes: * 1. Assign all the offsets relative to the Virtual '0'. Offsets above (the * incoming arguments) are positive. Offsets below (everything else) are * negative. This pass also calcuates the total frame size (between Caller's * SP/return address and the Ambient SP). * 2. Figure out where to place the frame pointer, and then adjust the offsets * as needed for the final stack size and whether the offset is frame pointer * relative or stack pointer relative. * */ // clang-format on void Compiler::lvaAssignFrameOffsets(FrameLayoutState curState) { noway_assert((lvaDoneFrameLayout < curState) || (curState == REGALLOC_FRAME_LAYOUT)); lvaDoneFrameLayout = curState; #ifdef DEBUG if (verbose) { printf("*************** In lvaAssignFrameOffsets"); if (curState == INITIAL_FRAME_LAYOUT) { printf("(INITIAL_FRAME_LAYOUT)"); } else if (curState == PRE_REGALLOC_FRAME_LAYOUT) { printf("(PRE_REGALLOC_FRAME_LAYOUT)"); } else if (curState == REGALLOC_FRAME_LAYOUT) { printf("(REGALLOC_FRAME_LAYOUT)"); } else if (curState == TENTATIVE_FRAME_LAYOUT) { printf("(TENTATIVE_FRAME_LAYOUT)"); } else if (curState == FINAL_FRAME_LAYOUT) { printf("(FINAL_FRAME_LAYOUT)"); } else { printf("(UNKNOWN)"); unreached(); } printf("\n"); } #endif #if FEATURE_FIXED_OUT_ARGS assert(lvaOutgoingArgSpaceVar != BAD_VAR_NUM); #endif // FEATURE_FIXED_OUT_ARGS /*------------------------------------------------------------------------- * * First process the arguments. * *------------------------------------------------------------------------- */ lvaAssignVirtualFrameOffsetsToArgs(); /*------------------------------------------------------------------------- * * Now compute stack offsets for any variables that don't live in registers * *------------------------------------------------------------------------- */ lvaAssignVirtualFrameOffsetsToLocals(); lvaAlignFrame(); /*------------------------------------------------------------------------- * * Now patch the offsets * *------------------------------------------------------------------------- */ lvaFixVirtualFrameOffsets(); // Modify the stack offset for fields of promoted structs. lvaAssignFrameOffsetsToPromotedStructs(); /*------------------------------------------------------------------------- * * Finalize * *------------------------------------------------------------------------- */ // If it's not the final frame layout, then it's just an estimate. This means // we're allowed to once again write to these variables, even if we've read // from them to make tentative code generation or frame layout decisions. if (curState < FINAL_FRAME_LAYOUT) { codeGen->resetFramePointerUsedWritePhase(); } } /***************************************************************************** * lvaFixVirtualFrameOffsets() : Now that everything has a virtual offset, * determine the final value for the frame pointer (if needed) and then * adjust all the offsets appropriately. * * This routine fixes virtual offset to be relative to frame pointer or SP * based on whether varDsc->lvFramePointerBased is true or false respectively. */ void Compiler::lvaFixVirtualFrameOffsets() { LclVarDsc* varDsc; #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_AMD64) if (lvaPSPSym != BAD_VAR_NUM) { // We need to fix the offset of the PSPSym so there is no padding between it and the outgoing argument space. // Without this code, lvaAlignFrame might have put the padding lower than the PSPSym, which would be between // the PSPSym and the outgoing argument space. varDsc = lvaGetDesc(lvaPSPSym); assert(varDsc->lvFramePointerBased); // We always access it RBP-relative. assert(!varDsc->lvMustInit); // It is never "must init". varDsc->SetStackOffset(codeGen->genCallerSPtoInitialSPdelta() + lvaLclSize(lvaOutgoingArgSpaceVar)); if (opts.IsOSR()) { // With OSR RBP points at the base of the OSR frame, but the virtual offsets // are from the base of the Tier0 frame. Adjust. // varDsc->SetStackOffset(varDsc->GetStackOffset() - info.compPatchpointInfo->TotalFrameSize()); } } #endif // The delta to be added to virtual offset to adjust it relative to frame pointer or SP int delta = 0; #ifdef TARGET_XARCH delta += REGSIZE_BYTES; // pushed PC (return address) for x86/x64 JITDUMP("--- delta bump %d for RA\n", REGSIZE_BYTES); if (codeGen->doubleAlignOrFramePointerUsed()) { JITDUMP("--- delta bump %d for FP\n", REGSIZE_BYTES); delta += REGSIZE_BYTES; // pushed EBP (frame pointer) } #endif if (!codeGen->isFramePointerUsed()) { // pushed registers, return address, and padding JITDUMP("--- delta bump %d for RSP frame\n", codeGen->genTotalFrameSize()); delta += codeGen->genTotalFrameSize(); } #if defined(TARGET_ARM) else { // We set FP to be after LR, FP delta += 2 * REGSIZE_BYTES; } #elif defined(TARGET_AMD64) || defined(TARGET_ARM64) else { // FP is used. JITDUMP("--- delta bump %d for FP frame\n", codeGen->genTotalFrameSize() - codeGen->genSPtoFPdelta()); delta += codeGen->genTotalFrameSize() - codeGen->genSPtoFPdelta(); } #endif // TARGET_AMD64 if (opts.IsOSR()) { #if defined(TARGET_AMD64) || defined(TARGET_ARM64) // Stack offset includes Tier0 frame. // JITDUMP("--- delta bump %d for OSR + Tier0 frame\n", info.compPatchpointInfo->TotalFrameSize()); delta += info.compPatchpointInfo->TotalFrameSize(); #endif } JITDUMP("--- virtual stack offset to actual stack offset delta is %d\n", delta); unsigned lclNum; for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++) { bool doAssignStkOffs = true; // Can't be relative to EBP unless we have an EBP noway_assert(!varDsc->lvFramePointerBased || codeGen->doubleAlignOrFramePointerUsed()); // Is this a non-param promoted struct field? // if so then set doAssignStkOffs to false. // if (varDsc->lvIsStructField) { LclVarDsc* parentvarDsc = lvaGetDesc(varDsc->lvParentLcl); lvaPromotionType promotionType = lvaGetPromotionType(parentvarDsc); #if defined(TARGET_X86) // On x86, we set the stack offset for a promoted field // to match a struct parameter in lvAssignFrameOffsetsToPromotedStructs. if ((!varDsc->lvIsParam || parentvarDsc->lvIsParam) && promotionType == PROMOTION_TYPE_DEPENDENT) #else if (!varDsc->lvIsParam && promotionType == PROMOTION_TYPE_DEPENDENT) #endif { doAssignStkOffs = false; // Assigned later in lvaAssignFrameOffsetsToPromotedStructs() } } if (!varDsc->lvOnFrame) { if (!varDsc->lvIsParam #if !defined(TARGET_AMD64) || (varDsc->lvIsRegArg #if defined(TARGET_ARM) && defined(PROFILING_SUPPORTED) && compIsProfilerHookNeeded() && !lvaIsPreSpilled(lclNum, codeGen->regSet.rsMaskPreSpillRegs(false)) // We need assign stack offsets // for prespilled arguments #endif ) #endif // !defined(TARGET_AMD64) ) { doAssignStkOffs = false; // Not on frame or an incomming stack arg } } if (doAssignStkOffs) { JITDUMP("-- V%02u was %d, now %d\n", lclNum, varDsc->GetStackOffset(), varDsc->GetStackOffset() + delta); varDsc->SetStackOffset(varDsc->GetStackOffset() + delta); #if DOUBLE_ALIGN if (genDoubleAlign() && !codeGen->isFramePointerUsed()) { if (varDsc->lvFramePointerBased) { varDsc->SetStackOffset(varDsc->GetStackOffset() - delta); // We need to re-adjust the offsets of the parameters so they are EBP // relative rather than stack/frame pointer relative varDsc->SetStackOffset(varDsc->GetStackOffset() + (2 * TARGET_POINTER_SIZE)); // return address and pushed EBP noway_assert(varDsc->GetStackOffset() >= FIRST_ARG_STACK_OFFS); } } #endif // On System V environments the stkOffs could be 0 for params passed in registers. // // For normal methods only EBP relative references can have negative offsets. assert(codeGen->isFramePointerUsed() || varDsc->GetStackOffset() >= 0); } } assert(codeGen->regSet.tmpAllFree()); for (TempDsc* temp = codeGen->regSet.tmpListBeg(); temp != nullptr; temp = codeGen->regSet.tmpListNxt(temp)) { temp->tdAdjustTempOffs(delta); } lvaCachedGenericContextArgOffs += delta; #if FEATURE_FIXED_OUT_ARGS if (lvaOutgoingArgSpaceVar != BAD_VAR_NUM) { varDsc = lvaGetDesc(lvaOutgoingArgSpaceVar); varDsc->SetStackOffset(0); varDsc->lvFramePointerBased = false; varDsc->lvMustInit = false; } #endif // FEATURE_FIXED_OUT_ARGS #ifdef TARGET_ARM64 // We normally add alignment below the locals between them and the outgoing // arg space area. When we store fp/lr at the bottom, however, this will be // below the alignment. So we should not apply the alignment adjustment to // them. On ARM64 it turns out we always store these at +0 and +8 of the FP, // so instead of dealing with skipping adjustment just for them we just set // them here always. assert(codeGen->isFramePointerUsed()); if (lvaRetAddrVar != BAD_VAR_NUM) { lvaTable[lvaRetAddrVar].SetStackOffset(REGSIZE_BYTES); } #endif } #ifdef TARGET_ARM bool Compiler::lvaIsPreSpilled(unsigned lclNum, regMaskTP preSpillMask) { const LclVarDsc& desc = lvaTable[lclNum]; return desc.lvIsRegArg && (preSpillMask & genRegMask(desc.GetArgReg())); } #endif // TARGET_ARM //------------------------------------------------------------------------ // lvaUpdateArgWithInitialReg: Set the initial register of a local variable // to the one assigned by the register allocator. // // Arguments: // varDsc - the local variable descriptor // void Compiler::lvaUpdateArgWithInitialReg(LclVarDsc* varDsc) { noway_assert(varDsc->lvIsParam); if (varDsc->lvIsRegCandidate()) { varDsc->SetRegNum(varDsc->GetArgInitReg()); } } //------------------------------------------------------------------------ // lvaUpdateArgsWithInitialReg() : For each argument variable descriptor, update // its current register with the initial register as assigned by LSRA. // void Compiler::lvaUpdateArgsWithInitialReg() { if (!compLSRADone) { return; } for (unsigned lclNum = 0; lclNum < info.compArgsCount; lclNum++) { LclVarDsc* varDsc = lvaGetDesc(lclNum); if (varDsc->lvPromotedStruct()) { for (unsigned fieldVarNum = varDsc->lvFieldLclStart; fieldVarNum < varDsc->lvFieldLclStart + varDsc->lvFieldCnt; ++fieldVarNum) { LclVarDsc* fieldVarDsc = lvaGetDesc(fieldVarNum); lvaUpdateArgWithInitialReg(fieldVarDsc); } } else { lvaUpdateArgWithInitialReg(varDsc); } } } /***************************************************************************** * lvaAssignVirtualFrameOffsetsToArgs() : Assign virtual stack offsets to the * arguments, and implicit arguments (this ptr, return buffer, generics, * and varargs). */ void Compiler::lvaAssignVirtualFrameOffsetsToArgs() { unsigned lclNum = 0; int argOffs = 0; #ifdef UNIX_AMD64_ABI int callerArgOffset = 0; #endif // UNIX_AMD64_ABI /* Assign stack offsets to arguments (in reverse order of passing). This means that if we pass arguments left->right, we start at the end of the list and work backwards, for right->left we start with the first argument and move forward. This is all relative to our Virtual '0' */ if (info.compArgOrder == Target::ARG_ORDER_L2R) { argOffs = compArgSize; } /* Update the argOffs to reflect arguments that are passed in registers */ noway_assert(codeGen->intRegState.rsCalleeRegArgCount <= MAX_REG_ARG); noway_assert(compMacOsArm64Abi() || compArgSize >= codeGen->intRegState.rsCalleeRegArgCount * REGSIZE_BYTES); if (info.compArgOrder == Target::ARG_ORDER_L2R) { argOffs -= codeGen->intRegState.rsCalleeRegArgCount * REGSIZE_BYTES; } // Update the arg initial register locations. lvaUpdateArgsWithInitialReg(); /* Is there a "this" argument? */ if (!info.compIsStatic) { noway_assert(lclNum == info.compThisArg); #ifndef TARGET_X86 argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum, REGSIZE_BYTES, argOffs UNIX_AMD64_ABI_ONLY_ARG(&callerArgOffset)); #endif // TARGET_X86 lclNum++; } unsigned userArgsToSkip = 0; #if !defined(TARGET_ARM) // In the native instance method calling convention on Windows, // the this parameter comes before the hidden return buffer parameter. // So, we want to process the native "this" parameter before we process // the native return buffer parameter. if (TargetOS::IsWindows && callConvIsInstanceMethodCallConv(info.compCallConv)) { #ifdef TARGET_X86 if (!lvaTable[lclNum].lvIsRegArg) { argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum, REGSIZE_BYTES, argOffs); } #elif !defined(UNIX_AMD64_ABI) argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum, REGSIZE_BYTES, argOffs); #endif // TARGET_X86 lclNum++; userArgsToSkip++; } #endif /* if we have a hidden buffer parameter, that comes here */ if (info.compRetBuffArg != BAD_VAR_NUM) { noway_assert(lclNum == info.compRetBuffArg); argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum, REGSIZE_BYTES, argOffs UNIX_AMD64_ABI_ONLY_ARG(&callerArgOffset)); lclNum++; } #if USER_ARGS_COME_LAST //@GENERICS: extra argument for instantiation info if (info.compMethodInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) { noway_assert(lclNum == (unsigned)info.compTypeCtxtArg); argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum++, REGSIZE_BYTES, argOffs UNIX_AMD64_ABI_ONLY_ARG(&callerArgOffset)); } if (info.compIsVarArgs) { argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum++, REGSIZE_BYTES, argOffs UNIX_AMD64_ABI_ONLY_ARG(&callerArgOffset)); } #endif // USER_ARGS_COME_LAST CORINFO_ARG_LIST_HANDLE argLst = info.compMethodInfo->args.args; unsigned argSigLen = info.compMethodInfo->args.numArgs; // Skip any user args that we've already processed. assert(userArgsToSkip <= argSigLen); argSigLen -= userArgsToSkip; for (unsigned i = 0; i < userArgsToSkip; i++, argLst = info.compCompHnd->getArgNext(argLst)) { ; } #ifdef TARGET_ARM // // struct_n { int; int; ... n times }; // // Consider signature: // // Foo (float a,double b,float c,double d,float e,double f,float g,double h, // float i,double j,float k,double l,struct_3 m) { } // // Basically the signature is: (all float regs full, 1 double, struct_3); // // The double argument occurs before pre spill in the argument iteration and // computes an argOffset of 0. struct_3 offset becomes 8. This is wrong. // Because struct_3 is prespilled and double occurs after prespill. // The correct offsets are double = 16 (aligned stk), struct_3 = 0..12, // Offset 12 will be skipped for double alignment of double. // // Another example is (struct_2, all float regs full, double, struct_2); // Here, notice the order is similarly messed up because of 2 pre-spilled // struct_2. // // Succinctly, // ARG_INDEX(i) > ARG_INDEX(j) DOES NOT IMPLY |ARG_OFFSET(i)| > |ARG_OFFSET(j)| // // Therefore, we'll do a two pass offset calculation, one that considers pre-spill // and the next, stack args. // unsigned argLcls = 0; // Take care of pre spill registers first. regMaskTP preSpillMask = codeGen->regSet.rsMaskPreSpillRegs(false); regMaskTP tempMask = RBM_NONE; for (unsigned i = 0, preSpillLclNum = lclNum; i < argSigLen; ++i, ++preSpillLclNum) { if (lvaIsPreSpilled(preSpillLclNum, preSpillMask)) { unsigned argSize = eeGetArgSize(argLst, &info.compMethodInfo->args); argOffs = lvaAssignVirtualFrameOffsetToArg(preSpillLclNum, argSize, argOffs); argLcls++; // Early out if we can. If size is 8 and base reg is 2, then the mask is 0x1100 tempMask |= ((((1 << (roundUp(argSize, TARGET_POINTER_SIZE) / REGSIZE_BYTES))) - 1) << lvaTable[preSpillLclNum].GetArgReg()); if (tempMask == preSpillMask) { // We won't encounter more pre-spilled registers, // so don't bother iterating further. break; } } argLst = info.compCompHnd->getArgNext(argLst); } // Take care of non pre-spilled stack arguments. argLst = info.compMethodInfo->args.args; for (unsigned i = 0, stkLclNum = lclNum; i < argSigLen; ++i, ++stkLclNum) { if (!lvaIsPreSpilled(stkLclNum, preSpillMask)) { const unsigned argSize = eeGetArgSize(argLst, &info.compMethodInfo->args); argOffs = lvaAssignVirtualFrameOffsetToArg(stkLclNum, argSize, argOffs); argLcls++; } argLst = info.compCompHnd->getArgNext(argLst); } lclNum += argLcls; #else // !TARGET_ARM for (unsigned i = 0; i < argSigLen; i++) { unsigned argumentSize = eeGetArgSize(argLst, &info.compMethodInfo->args); assert(compMacOsArm64Abi() || argumentSize % TARGET_POINTER_SIZE == 0); argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum++, argumentSize, argOffs UNIX_AMD64_ABI_ONLY_ARG(&callerArgOffset)); argLst = info.compCompHnd->getArgNext(argLst); } #endif // !TARGET_ARM #if !USER_ARGS_COME_LAST //@GENERICS: extra argument for instantiation info if (info.compMethodInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) { noway_assert(lclNum == (unsigned)info.compTypeCtxtArg); argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum++, REGSIZE_BYTES, argOffs UNIX_AMD64_ABI_ONLY_ARG(&callerArgOffset)); } if (info.compIsVarArgs) { argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum++, REGSIZE_BYTES, argOffs UNIX_AMD64_ABI_ONLY_ARG(&callerArgOffset)); } #endif // USER_ARGS_COME_LAST } #ifdef UNIX_AMD64_ABI // // lvaAssignVirtualFrameOffsetToArg() : Assign virtual stack offsets to an // individual argument, and return the offset for the next argument. // Note: This method only calculates the initial offset of the stack passed/spilled arguments // (if any - the RA might decide to spill(home on the stack) register passed arguments, if rarely used.) // The final offset is calculated in lvaFixVirtualFrameOffsets method. It accounts for FP existance, // ret address slot, stack frame padding, alloca instructions, etc. // Note: This is the implementation for UNIX_AMD64 System V platforms. // int Compiler::lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, unsigned argSize, int argOffs UNIX_AMD64_ABI_ONLY_ARG(int* callerArgOffset)) { noway_assert(lclNum < info.compArgsCount); noway_assert(argSize); if (info.compArgOrder == Target::ARG_ORDER_L2R) { argOffs -= argSize; } unsigned fieldVarNum = BAD_VAR_NUM; LclVarDsc* varDsc = lvaGetDesc(lclNum); noway_assert(varDsc->lvIsParam); if (varDsc->lvIsRegArg) { // Argument is passed in a register, don't count it // when updating the current offset on the stack. if (varDsc->lvOnFrame) { // The offset for args needs to be set only for the stack homed arguments for System V. varDsc->SetStackOffset(argOffs); } else { varDsc->SetStackOffset(0); } } else { // For Windows AMD64 there are 4 slots for the register passed arguments on the top of the caller's stack. // This is where they are always homed. So, they can be accessed with positive offset. // On System V platforms, if the RA decides to home a register passed arg on the stack, it creates a stack // location on the callee stack (like any other local var.) In such a case, the register passed, stack homed // arguments are accessed using negative offsets and the stack passed arguments are accessed using positive // offset (from the caller's stack.) // For System V platforms if there is no frame pointer the caller stack parameter offset should include the // callee allocated space. If frame register is used, the callee allocated space should not be included for // accessing the caller stack parameters. The last two requirements are met in lvaFixVirtualFrameOffsets // method, which fixes the offsets, based on frame pointer existence, existence of alloca instructions, ret // address pushed, ets. varDsc->SetStackOffset(*callerArgOffset); // Structs passed on stack could be of size less than TARGET_POINTER_SIZE. // Make sure they get at least TARGET_POINTER_SIZE on the stack - this is required for alignment. if (argSize > TARGET_POINTER_SIZE) { *callerArgOffset += (int)roundUp(argSize, TARGET_POINTER_SIZE); } else { *callerArgOffset += TARGET_POINTER_SIZE; } } // For struct promoted parameters we need to set the offsets for the field lclVars. // // For a promoted struct we also assign the struct fields stack offset if (varDsc->lvPromotedStruct()) { unsigned firstFieldNum = varDsc->lvFieldLclStart; int offset = varDsc->GetStackOffset(); for (unsigned i = 0; i < varDsc->lvFieldCnt; i++) { LclVarDsc* fieldVarDsc = lvaGetDesc(firstFieldNum + i); fieldVarDsc->SetStackOffset(offset + fieldVarDsc->lvFldOffset); } } if (info.compArgOrder == Target::ARG_ORDER_R2L && !varDsc->lvIsRegArg) { argOffs += argSize; } return argOffs; } #else // !UNIX_AMD64_ABI // // lvaAssignVirtualFrameOffsetToArg() : Assign virtual stack offsets to an // individual argument, and return the offset for the next argument. // Note: This method only calculates the initial offset of the stack passed/spilled arguments // (if any - the RA might decide to spill(home on the stack) register passed arguments, if rarely used.) // The final offset is calculated in lvaFixVirtualFrameOffsets method. It accounts for FP existance, // ret address slot, stack frame padding, alloca instructions, etc. // Note: This implementation for all the platforms but UNIX_AMD64 OSs (System V 64 bit.) int Compiler::lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, unsigned argSize, int argOffs UNIX_AMD64_ABI_ONLY_ARG(int* callerArgOffset)) { noway_assert(lclNum < info.compArgsCount); noway_assert(argSize); if (info.compArgOrder == Target::ARG_ORDER_L2R) { argOffs -= argSize; } unsigned fieldVarNum = BAD_VAR_NUM; LclVarDsc* varDsc = lvaGetDesc(lclNum); noway_assert(varDsc->lvIsParam); if (varDsc->lvIsRegArg) { /* Argument is passed in a register, don't count it * when updating the current offset on the stack */ CLANG_FORMAT_COMMENT_ANCHOR; #if !defined(TARGET_ARMARCH) #if DEBUG // TODO: Remove this noway_assert and replace occurrences of TARGET_POINTER_SIZE with argSize // Also investigate why we are incrementing argOffs for X86 as this seems incorrect // noway_assert(argSize == TARGET_POINTER_SIZE); #endif // DEBUG #endif #if defined(TARGET_X86) argOffs += TARGET_POINTER_SIZE; #elif defined(TARGET_AMD64) // Register arguments on AMD64 also takes stack space. (in the backing store) varDsc->SetStackOffset(argOffs); argOffs += TARGET_POINTER_SIZE; #elif defined(TARGET_ARM64) // Register arguments on ARM64 only take stack space when they have a frame home. // Unless on windows and in a vararg method. if (compFeatureArgSplit() && this->info.compIsVarArgs) { if (varDsc->lvType == TYP_STRUCT && varDsc->GetOtherArgReg() >= MAX_REG_ARG && varDsc->GetOtherArgReg() != REG_NA) { // This is a split struct. It will account for an extra (8 bytes) // of alignment. varDsc->SetStackOffset(varDsc->GetStackOffset() + TARGET_POINTER_SIZE); argOffs += TARGET_POINTER_SIZE; } } #elif defined(TARGET_ARM) // On ARM we spill the registers in codeGen->regSet.rsMaskPreSpillRegArg // in the prolog, so we have to do SetStackOffset() here // regMaskTP regMask = genRegMask(varDsc->GetArgReg()); if (codeGen->regSet.rsMaskPreSpillRegArg & regMask) { // Signature: void foo(struct_8, int, struct_4) // ------- CALLER SP ------- // r3 struct_4 // r2 int - not prespilled, but added for alignment. argOffs should skip this. // r1 struct_8 // r0 struct_8 // ------------------------- // If we added alignment we need to fix argOffs for all registers above alignment. if (codeGen->regSet.rsMaskPreSpillAlign != RBM_NONE) { assert(genCountBits(codeGen->regSet.rsMaskPreSpillAlign) == 1); // Is register beyond the alignment pos? if (regMask > codeGen->regSet.rsMaskPreSpillAlign) { // Increment argOffs just once for the _first_ register after alignment pos // in the prespill mask. if (!BitsBetween(codeGen->regSet.rsMaskPreSpillRegArg, regMask, codeGen->regSet.rsMaskPreSpillAlign)) { argOffs += TARGET_POINTER_SIZE; } } } switch (varDsc->lvType) { case TYP_STRUCT: if (!varDsc->lvStructDoubleAlign) { break; } FALLTHROUGH; case TYP_DOUBLE: case TYP_LONG: { // // Let's assign offsets to arg1, a double in r2. argOffs has to be 4 not 8. // // ------- CALLER SP ------- // r3 // r2 double -- argOffs = 4, but it doesn't need to be skipped, because there is no skipping. // r1 VACookie -- argOffs = 0 // ------------------------- // // Consider argOffs as if it accounts for number of prespilled registers before the current // register. In the above example, for r2, it is r1 that is prespilled, but since r1 is // accounted for by argOffs being 4, there should have been no skipping. Instead, if we didn't // assign r1 to any variable, then argOffs would still be 0 which implies it is not accounting // for r1, equivalently r1 is skipped. // // If prevRegsSize is unaccounted for by a corresponding argOffs, we must have skipped a register. int prevRegsSize = genCountBits(codeGen->regSet.rsMaskPreSpillRegArg & (regMask - 1)) * TARGET_POINTER_SIZE; if (argOffs < prevRegsSize) { // We must align up the argOffset to a multiple of 8 to account for skipped registers. argOffs = roundUp((unsigned)argOffs, 2 * TARGET_POINTER_SIZE); } // We should've skipped only a single register. assert(argOffs == prevRegsSize); } break; default: // No alignment of argOffs required break; } varDsc->SetStackOffset(argOffs); argOffs += argSize; } #else // TARGET* #error Unsupported or unset target architecture #endif // TARGET* } else { #if defined(TARGET_ARM) // Dev11 Bug 42817: incorrect codegen for DrawFlatCheckBox causes A/V in WinForms // // Here we have method with a signature (int a1, struct a2, struct a3, int a4, int a5). // Struct parameter 'a2' is 16-bytes with no alignment requirements; // it uses r1,r2,r3 and [OutArg+0] when passed. // Struct parameter 'a3' is 16-bytes that is required to be double aligned; // the caller skips [OutArg+4] and starts the argument at [OutArg+8]. // Thus the caller generates the correct code to pass the arguments. // When generating code to receive the arguments we set codeGen->regSet.rsMaskPreSpillRegArg to [r1,r2,r3] // and spill these three registers as the first instruction in the prolog. // Then when we layout the arguments' stack offsets we have an argOffs 0 which // points at the location that we spilled r1 into the stack. For this first // struct we take the lvIsRegArg path above with "codeGen->regSet.rsMaskPreSpillRegArg &" matching. // Next when we calculate the argOffs for the second 16-byte struct we have an argOffs // of 16, which appears to be aligned properly so we don't skip a stack slot. // // To fix this we must recover the actual OutArg offset by subtracting off the // sizeof of the PreSpill register args. // Then we align this offset to a multiple of 8 and add back the sizeof // of the PreSpill register args. // // Dev11 Bug 71767: failure of assert(sizeofPreSpillRegArgs <= argOffs) // // We have a method with 'this' passed in r0, RetBuf arg in r1, VarArgs cookie // in r2. The first user arg is a 144 byte struct with double alignment required, // r3 is skipped, and the struct is passed on the stack. However, 'r3' is added // to the codeGen->regSet.rsMaskPreSpillRegArg mask by the VarArgs cookie code, since we need to // home all the potential varargs arguments in registers, even if we don't have // signature type information for the variadic arguments. However, due to alignment, // we have skipped a register that doesn't have a corresponding symbol. Make up // for that by increasing argOffs here. // int sizeofPreSpillRegArgs = genCountBits(codeGen->regSet.rsMaskPreSpillRegs(true)) * REGSIZE_BYTES; if (argOffs < sizeofPreSpillRegArgs) { // This can only happen if we skipped the last register spot because current stk arg // is a struct requiring alignment or a pre-spill alignment was required because the // first reg arg needed alignment. // // Example 1: First Stk Argument requiring alignment in vararg case (same as above comment.) // Signature (int a0, int a1, int a2, struct {long} a3, ...) // // stk arg a3 --> argOffs here will be 12 (r0-r2) but pre-spill will be 16. // ---- Caller SP ---- // r3 --> Stack slot is skipped in this case. // r2 int a2 // r1 int a1 // r0 int a0 // // Example 2: First Reg Argument requiring alignment in no-vararg case. // Signature (struct {long} a0, struct {int} a1, int a2, int a3) // // stk arg --> argOffs here will be 12 {r0-r2} but pre-spill will be 16. // ---- Caller SP ---- // r3 int a2 --> pushed (not pre-spilled) for alignment of a0 by lvaInitUserArgs. // r2 struct { int } a1 // r0-r1 struct { long } a0 CLANG_FORMAT_COMMENT_ANCHOR; #ifdef PROFILING_SUPPORTED // On Arm under profiler, r0-r3 are always prespilled on stack. // It is possible to have methods that accept only HFAs as parameters e.g. Signature(struct hfa1, struct // hfa2), in which case hfa1 and hfa2 will be en-registered in co-processor registers and will have an // argument offset less than size of preSpill. // // For this reason the following conditions are asserted when not under profiler. if (!compIsProfilerHookNeeded()) #endif { bool cond = ((info.compIsVarArgs || opts.compUseSoftFP) && // Does cur stk arg require double alignment? ((varDsc->lvType == TYP_STRUCT && varDsc->lvStructDoubleAlign) || (varDsc->lvType == TYP_DOUBLE) || (varDsc->lvType == TYP_LONG))) || // Did first reg arg require alignment? (codeGen->regSet.rsMaskPreSpillAlign & genRegMask(REG_ARG_LAST)); noway_assert(cond); noway_assert(sizeofPreSpillRegArgs <= argOffs + TARGET_POINTER_SIZE); // at most one register of alignment } argOffs = sizeofPreSpillRegArgs; } noway_assert(argOffs >= sizeofPreSpillRegArgs); int argOffsWithoutPreSpillRegArgs = argOffs - sizeofPreSpillRegArgs; switch (varDsc->lvType) { case TYP_STRUCT: if (!varDsc->lvStructDoubleAlign) break; FALLTHROUGH; case TYP_DOUBLE: case TYP_LONG: // We must align up the argOffset to a multiple of 8 argOffs = roundUp((unsigned)argOffsWithoutPreSpillRegArgs, 2 * TARGET_POINTER_SIZE) + sizeofPreSpillRegArgs; break; default: // No alignment of argOffs required break; } #endif // TARGET_ARM const bool isFloatHfa = (varDsc->lvIsHfa() && (varDsc->GetHfaType() == TYP_FLOAT)); const unsigned argAlignment = eeGetArgSizeAlignment(varDsc->lvType, isFloatHfa); if (compMacOsArm64Abi()) { argOffs = roundUp(argOffs, argAlignment); } assert((argSize % argAlignment) == 0); assert((argOffs % argAlignment) == 0); varDsc->SetStackOffset(argOffs); } // For struct promoted parameters we need to set the offsets for both LclVars. // // For a dependent promoted struct we also assign the struct fields stack offset CLANG_FORMAT_COMMENT_ANCHOR; #if !defined(TARGET_64BIT) if ((varDsc->TypeGet() == TYP_LONG) && varDsc->lvPromoted) { noway_assert(varDsc->lvFieldCnt == 2); fieldVarNum = varDsc->lvFieldLclStart; lvaTable[fieldVarNum].SetStackOffset(varDsc->GetStackOffset()); lvaTable[fieldVarNum + 1].SetStackOffset(varDsc->GetStackOffset() + genTypeSize(TYP_INT)); } else #endif // !defined(TARGET_64BIT) if (varDsc->lvPromotedStruct()) { unsigned firstFieldNum = varDsc->lvFieldLclStart; for (unsigned i = 0; i < varDsc->lvFieldCnt; i++) { LclVarDsc* fieldVarDsc = lvaGetDesc(firstFieldNum + i); JITDUMP("Adjusting offset of dependent V%02u of arg V%02u: parent %u field %u net %u\n", lclNum, firstFieldNum + i, varDsc->GetStackOffset(), fieldVarDsc->lvFldOffset, varDsc->GetStackOffset() + fieldVarDsc->lvFldOffset); fieldVarDsc->SetStackOffset(varDsc->GetStackOffset() + fieldVarDsc->lvFldOffset); } } if (info.compArgOrder == Target::ARG_ORDER_R2L && !varDsc->lvIsRegArg) { argOffs += argSize; } return argOffs; } #endif // !UNIX_AMD64_ABI //----------------------------------------------------------------------------- // lvaAssingVirtualFrameOffsetsToLocals: compute the virtual stack offsets for // all elements on the stackframe. // // Notes: // Can be called multiple times. Early calls can be used to estimate various // frame offsets, but details may change. // void Compiler::lvaAssignVirtualFrameOffsetsToLocals() { // (1) Account for things that are set up by the prolog and undone by the epilog. // int stkOffs = 0; int originalFrameStkOffs = 0; int originalFrameSize = 0; // codeGen->isFramePointerUsed is set in regalloc phase. Initialize it to a guess for pre-regalloc layout. if (lvaDoneFrameLayout <= PRE_REGALLOC_FRAME_LAYOUT) { codeGen->setFramePointerUsed(codeGen->isFramePointerRequired()); } #ifdef TARGET_ARM64 // Decide where to save FP and LR registers. We store FP/LR registers at the bottom of the frame if there is // a frame pointer used (so we get positive offsets from the frame pointer to access locals), but not if we // need a GS cookie AND localloc is used, since we need the GS cookie to protect the saved return value, // and also the saved frame pointer. See CodeGen::genPushCalleeSavedRegisters() for more details about the // frame types. Since saving FP/LR at high addresses is a relatively rare case, force using it during stress. // (It should be legal to use these frame types for every frame). if (opts.compJitSaveFpLrWithCalleeSavedRegisters == 0) { // Default configuration codeGen->SetSaveFpLrWithAllCalleeSavedRegisters((getNeedsGSSecurityCookie() && compLocallocUsed) || compStressCompile(STRESS_GENERIC_VARN, 20)); } else if (opts.compJitSaveFpLrWithCalleeSavedRegisters == 1) { codeGen->SetSaveFpLrWithAllCalleeSavedRegisters(false); // Disable using new frames } else if (opts.compJitSaveFpLrWithCalleeSavedRegisters == 2) { codeGen->SetSaveFpLrWithAllCalleeSavedRegisters(true); // Force using new frames } #endif // TARGET_ARM64 #ifdef TARGET_XARCH // On x86/amd64, the return address has already been pushed by the call instruction in the caller. stkOffs -= TARGET_POINTER_SIZE; // return address; if (lvaRetAddrVar != BAD_VAR_NUM) { lvaTable[lvaRetAddrVar].SetStackOffset(stkOffs); } #endif // If we are an OSR method, we "inherit" the frame of the original method // if (opts.IsOSR()) { originalFrameSize = info.compPatchpointInfo->TotalFrameSize(); originalFrameStkOffs = stkOffs; stkOffs -= originalFrameSize; } #ifdef TARGET_XARCH // TODO-AMD64-CQ: for X64 eventually this should be pushed with all the other // calleeregs. When you fix this, you'll also need to fix // the assert at the bottom of this method if (codeGen->doubleAlignOrFramePointerUsed()) { stkOffs -= REGSIZE_BYTES; } #endif int preSpillSize = 0; bool mustDoubleAlign = false; #ifdef TARGET_ARM mustDoubleAlign = true; preSpillSize = genCountBits(codeGen->regSet.rsMaskPreSpillRegs(true)) * REGSIZE_BYTES; #else // !TARGET_ARM #if DOUBLE_ALIGN if (genDoubleAlign()) { mustDoubleAlign = true; // X86 only } #endif #endif // !TARGET_ARM #ifdef TARGET_ARM64 // If the frame pointer is used, then we'll save FP/LR at the bottom of the stack. // Otherwise, we won't store FP, and we'll store LR at the top, with the other callee-save // registers (if any). int initialStkOffs = 0; if (info.compIsVarArgs) { // For varargs we always save all of the integer register arguments // so that they are contiguous with the incoming stack arguments. initialStkOffs = MAX_REG_ARG * REGSIZE_BYTES; stkOffs -= initialStkOffs; } if (codeGen->IsSaveFpLrWithAllCalleeSavedRegisters() || !isFramePointerUsed()) // Note that currently we always have a frame pointer { stkOffs -= compCalleeRegsPushed * REGSIZE_BYTES; } else { // Subtract off FP and LR. assert(compCalleeRegsPushed >= 2); stkOffs -= (compCalleeRegsPushed - 2) * REGSIZE_BYTES; } #else // !TARGET_ARM64 #ifdef TARGET_ARM // On ARM32 LR is part of the pushed registers and is always stored at the // top. if (lvaRetAddrVar != BAD_VAR_NUM) { lvaTable[lvaRetAddrVar].SetStackOffset(stkOffs - REGSIZE_BYTES); } #endif stkOffs -= compCalleeRegsPushed * REGSIZE_BYTES; #endif // !TARGET_ARM64 // (2) Account for the remainder of the frame // // From this point on the code must generally adjust both // stkOffs and the local frame size. The latter is done via: // // lvaIncrementFrameSize -- for space not associated with a local var // lvaAllocLocalAndSetVirtualOffset -- for space associated with a local var // // One exception to the above: OSR locals that have offsets within the Tier0 // portion of the frame. // compLclFrameSize = 0; #ifdef TARGET_AMD64 // For methods with patchpoints, the Tier0 method must reserve // space for all the callee saves, as this area is shared with the // OSR method, and we have to anticipate that collectively the // Tier0 and OSR methods end up saving all callee saves. // // Currently this is x64 only. // if (doesMethodHavePatchpoints() || doesMethodHavePartialCompilationPatchpoints()) { const unsigned regsPushed = compCalleeRegsPushed + (codeGen->isFramePointerUsed() ? 1 : 0); const unsigned extraSlots = genCountBits(RBM_OSR_INT_CALLEE_SAVED) - regsPushed; const unsigned extraSlotSize = extraSlots * REGSIZE_BYTES; JITDUMP("\nMethod has patchpoints and has %u callee saves.\n" "Reserving %u extra slots (%u bytes) for potential OSR method callee saves\n", regsPushed, extraSlots, extraSlotSize); stkOffs -= extraSlotSize; lvaIncrementFrameSize(extraSlotSize); } // In case of Amd64 compCalleeRegsPushed does not include float regs (Xmm6-xmm15) that // need to be pushed. But Amd64 doesn't support push/pop of xmm registers. // Instead we need to allocate space for them on the stack and save them in prolog. // Therefore, we consider xmm registers being saved while computing stack offsets // but space for xmm registers is considered part of compLclFrameSize. // Notes // 1) We need to save the entire 128-bits of xmm register to stack, since amd64 // prolog unwind codes allow encoding of an instruction that stores the entire xmm reg // at an offset relative to SP // 2) We adjust frame size so that SP is aligned at 16-bytes after pushing integer registers. // This means while saving the first xmm register to its allocated stack location we might // have to skip 8-bytes. The reason for padding is to use efficient "movaps" to save/restore // xmm registers to/from stack to match Jit64 codegen. Without the aligning on 16-byte // boundary we would have to use movups when offset turns out unaligned. Movaps is more // performant than movups. const unsigned calleeFPRegsSavedSize = genCountBits(compCalleeFPRegsSavedMask) * XMM_REGSIZE_BYTES; // For OSR the alignment pad computation should not take the original frame into account. // Original frame size includes the pseudo-saved RA and so is always = 8 mod 16. const int offsetForAlign = -(stkOffs + originalFrameSize); if ((calleeFPRegsSavedSize > 0) && ((offsetForAlign % XMM_REGSIZE_BYTES) != 0)) { // Take care of alignment int alignPad = (int)AlignmentPad((unsigned)offsetForAlign, XMM_REGSIZE_BYTES); assert(alignPad != 0); stkOffs -= alignPad; lvaIncrementFrameSize(alignPad); } stkOffs -= calleeFPRegsSavedSize; lvaIncrementFrameSize(calleeFPRegsSavedSize); // Quirk for VS debug-launch scenario to work if (compVSQuirkStackPaddingNeeded > 0) { #ifdef DEBUG if (verbose) { printf("\nAdding VS quirk stack padding of %d bytes between save-reg area and locals\n", compVSQuirkStackPaddingNeeded); } #endif // DEBUG stkOffs -= compVSQuirkStackPaddingNeeded; lvaIncrementFrameSize(compVSQuirkStackPaddingNeeded); } #endif // TARGET_AMD64 #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARMARCH) if (lvaPSPSym != BAD_VAR_NUM) { // On ARM/ARM64, if we need a PSPSym, allocate it first, before anything else, including // padding (so we can avoid computing the same padding in the funclet // frame). Note that there is no special padding requirement for the PSPSym. noway_assert(codeGen->isFramePointerUsed()); // We need an explicit frame pointer stkOffs = lvaAllocLocalAndSetVirtualOffset(lvaPSPSym, TARGET_POINTER_SIZE, stkOffs); } #endif // FEATURE_EH_FUNCLETS && defined(TARGET_ARMARCH) if (mustDoubleAlign) { if (lvaDoneFrameLayout != FINAL_FRAME_LAYOUT) { // Allocate a pointer sized stack slot, since we may need to double align here // when lvaDoneFrameLayout == FINAL_FRAME_LAYOUT // lvaIncrementFrameSize(TARGET_POINTER_SIZE); stkOffs -= TARGET_POINTER_SIZE; // If we have any TYP_LONG, TYP_DOUBLE or double aligned structs // then we need to allocate a second pointer sized stack slot, // since we may need to double align that LclVar when we see it // in the loop below. We will just always do this so that the // offsets that we calculate for the stack frame will always // be greater (or equal) to what they can be in the final layout. // lvaIncrementFrameSize(TARGET_POINTER_SIZE); stkOffs -= TARGET_POINTER_SIZE; } else // FINAL_FRAME_LAYOUT { if (((stkOffs + preSpillSize) % (2 * TARGET_POINTER_SIZE)) != 0) { lvaIncrementFrameSize(TARGET_POINTER_SIZE); stkOffs -= TARGET_POINTER_SIZE; } // We should now have a double-aligned (stkOffs+preSpillSize) noway_assert(((stkOffs + preSpillSize) % (2 * TARGET_POINTER_SIZE)) == 0); } } if (lvaMonAcquired != BAD_VAR_NUM) { // For OSR we use the flag set up by the original method. // if (opts.IsOSR()) { assert(info.compPatchpointInfo->HasMonitorAcquired()); int originalOffset = info.compPatchpointInfo->MonitorAcquiredOffset(); int offset = originalFrameStkOffs + originalOffset; JITDUMP( "---OSR--- V%02u (on tier0 frame, monitor aquired) tier0 FP-rel offset %d tier0 frame offset %d new " "virt offset %d\n", lvaMonAcquired, originalOffset, originalFrameStkOffs, offset); lvaTable[lvaMonAcquired].SetStackOffset(offset); } else { // This var must go first, in what is called the 'frame header' for EnC so that it is // preserved when remapping occurs. See vm\eetwain.cpp for detailed comment specifying frame // layout requirements for EnC to work. stkOffs = lvaAllocLocalAndSetVirtualOffset(lvaMonAcquired, lvaLclSize(lvaMonAcquired), stkOffs); } } #ifdef JIT32_GCENCODER if (lvaLocAllocSPvar != BAD_VAR_NUM) { noway_assert(codeGen->isFramePointerUsed()); // else offsets of locals of frameless methods will be incorrect stkOffs = lvaAllocLocalAndSetVirtualOffset(lvaLocAllocSPvar, TARGET_POINTER_SIZE, stkOffs); } #endif // JIT32_GCENCODER // For OSR methods, param type args are always reportable via the root method frame slot. // (see gcInfoBlockHdrSave) and so do not need a new slot on the frame. // // OSR methods may also be able to use the root frame kept alive this, if the root // method needed to report this. // // Inlining done under OSR may introduce new reporting, in which case the OSR frame // must allocate a slot. if (lvaReportParamTypeArg()) { #ifdef JIT32_GCENCODER noway_assert(codeGen->isFramePointerUsed()); #endif if (opts.IsOSR()) { PatchpointInfo* ppInfo = info.compPatchpointInfo; assert(ppInfo->HasGenericContextArgOffset()); const int originalOffset = ppInfo->GenericContextArgOffset(); lvaCachedGenericContextArgOffs = originalFrameStkOffs + originalOffset; } else { // For CORINFO_CALLCONV_PARAMTYPE (if needed) lvaIncrementFrameSize(TARGET_POINTER_SIZE); stkOffs -= TARGET_POINTER_SIZE; lvaCachedGenericContextArgOffs = stkOffs; } } #ifndef JIT32_GCENCODER else if (lvaKeepAliveAndReportThis()) { bool canUseExistingSlot = false; if (opts.IsOSR()) { PatchpointInfo* ppInfo = info.compPatchpointInfo; if (ppInfo->HasKeptAliveThis()) { const int originalOffset = ppInfo->KeptAliveThisOffset(); lvaCachedGenericContextArgOffs = originalFrameStkOffs + originalOffset; canUseExistingSlot = true; } } if (!canUseExistingSlot) { // When "this" is also used as generic context arg. lvaIncrementFrameSize(TARGET_POINTER_SIZE); stkOffs -= TARGET_POINTER_SIZE; lvaCachedGenericContextArgOffs = stkOffs; } } #endif #if !defined(FEATURE_EH_FUNCLETS) /* If we need space for slots for shadow SP, reserve it now */ if (ehNeedsShadowSPslots()) { noway_assert(codeGen->isFramePointerUsed()); // else offsets of locals of frameless methods will be incorrect if (!lvaReportParamTypeArg()) { #ifndef JIT32_GCENCODER if (!lvaKeepAliveAndReportThis()) #endif { // In order to keep the gc info encoding smaller, the VM assumes that all methods with EH // have also saved space for a ParamTypeArg, so we need to do that here lvaIncrementFrameSize(TARGET_POINTER_SIZE); stkOffs -= TARGET_POINTER_SIZE; } } stkOffs = lvaAllocLocalAndSetVirtualOffset(lvaShadowSPslotsVar, lvaLclSize(lvaShadowSPslotsVar), stkOffs); } #endif // !FEATURE_EH_FUNCLETS if (compGSReorderStackLayout) { assert(getNeedsGSSecurityCookie()); if (!opts.IsOSR() || !info.compPatchpointInfo->HasSecurityCookie()) { stkOffs = lvaAllocLocalAndSetVirtualOffset(lvaGSSecurityCookie, lvaLclSize(lvaGSSecurityCookie), stkOffs); } } /* If we're supposed to track lifetimes of pointer temps, we'll assign frame offsets in the following order: non-pointer local variables (also untracked pointer variables) pointer local variables pointer temps non-pointer temps */ enum Allocation { ALLOC_NON_PTRS = 0x1, // assign offsets to non-ptr ALLOC_PTRS = 0x2, // Second pass, assign offsets to tracked ptrs ALLOC_UNSAFE_BUFFERS = 0x4, ALLOC_UNSAFE_BUFFERS_WITH_PTRS = 0x8 }; UINT alloc_order[5]; unsigned int cur = 0; if (compGSReorderStackLayout) { noway_assert(getNeedsGSSecurityCookie()); if (codeGen->isFramePointerUsed()) { alloc_order[cur++] = ALLOC_UNSAFE_BUFFERS; alloc_order[cur++] = ALLOC_UNSAFE_BUFFERS_WITH_PTRS; } } bool tempsAllocated = false; if (lvaTempsHaveLargerOffsetThanVars() && !codeGen->isFramePointerUsed()) { // Because we want the temps to have a larger offset than locals // and we're not using a frame pointer, we have to place the temps // above the vars. Otherwise we place them after the vars (at the // bottom of the frame). noway_assert(!tempsAllocated); stkOffs = lvaAllocateTemps(stkOffs, mustDoubleAlign); tempsAllocated = true; } alloc_order[cur++] = ALLOC_NON_PTRS; if (opts.compDbgEnC) { /* We will use just one pass, and assign offsets to all variables */ alloc_order[cur - 1] |= ALLOC_PTRS; noway_assert(compGSReorderStackLayout == false); } else { alloc_order[cur++] = ALLOC_PTRS; } if (!codeGen->isFramePointerUsed() && compGSReorderStackLayout) { alloc_order[cur++] = ALLOC_UNSAFE_BUFFERS_WITH_PTRS; alloc_order[cur++] = ALLOC_UNSAFE_BUFFERS; } alloc_order[cur] = 0; noway_assert(cur < ArrLen(alloc_order)); // Force first pass to happen UINT assignMore = 0xFFFFFFFF; bool have_LclVarDoubleAlign = false; for (cur = 0; alloc_order[cur]; cur++) { if ((assignMore & alloc_order[cur]) == 0) { continue; } assignMore = 0; unsigned lclNum; LclVarDsc* varDsc; for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++) { /* Ignore field locals of the promotion type PROMOTION_TYPE_FIELD_DEPENDENT. In other words, we will not calculate the "base" address of the struct local if the promotion type is PROMOTION_TYPE_FIELD_DEPENDENT. */ if (lvaIsFieldOfDependentlyPromotedStruct(varDsc)) { continue; } #if FEATURE_FIXED_OUT_ARGS // The scratch mem is used for the outgoing arguments, and it must be absolutely last if (lclNum == lvaOutgoingArgSpaceVar) { continue; } #endif bool allocateOnFrame = varDsc->lvOnFrame; if (varDsc->lvRegister && (lvaDoneFrameLayout == REGALLOC_FRAME_LAYOUT) && ((varDsc->TypeGet() != TYP_LONG) || (varDsc->GetOtherReg() != REG_STK))) { allocateOnFrame = false; } // For OSR args and locals, we use the slots on the original frame. // // Note we must do this even for "non frame" locals, as we sometimes // will refer to their memory homes. if (lvaIsOSRLocal(lclNum)) { if (varDsc->lvIsStructField) { const unsigned parentLclNum = varDsc->lvParentLcl; const int parentOriginalOffset = info.compPatchpointInfo->Offset(parentLclNum); const int offset = originalFrameStkOffs + parentOriginalOffset + varDsc->lvFldOffset; JITDUMP("---OSR--- V%02u (promoted field of V%02u; on tier0 frame) tier0 FP-rel offset %d tier0 " "frame offset %d field offset %d new virt offset " "%d\n", lclNum, parentLclNum, parentOriginalOffset, originalFrameStkOffs, varDsc->lvFldOffset, offset); lvaTable[lclNum].SetStackOffset(offset); } else { // Add frampointer-relative offset of this OSR live local in the original frame // to the offset of original frame in our new frame. const int originalOffset = info.compPatchpointInfo->Offset(lclNum); const int offset = originalFrameStkOffs + originalOffset; JITDUMP( "---OSR--- V%02u (on tier0 frame) tier0 FP-rel offset %d tier0 frame offset %d new virt offset " "%d\n", lclNum, originalOffset, originalFrameStkOffs, offset); lvaTable[lclNum].SetStackOffset(offset); } continue; } /* Ignore variables that are not on the stack frame */ if (!allocateOnFrame) { /* For EnC, all variables have to be allocated space on the stack, even though they may actually be enregistered. This way, the frame layout can be directly inferred from the locals-sig. */ if (!opts.compDbgEnC) { continue; } else if (lclNum >= info.compLocalsCount) { // ignore temps for EnC continue; } } else if (lvaGSSecurityCookie == lclNum && getNeedsGSSecurityCookie()) { // Special case for OSR. If the original method had a cookie, // we use its slot on the original frame. if (opts.IsOSR() && info.compPatchpointInfo->HasSecurityCookie()) { int originalOffset = info.compPatchpointInfo->SecurityCookieOffset(); int offset = originalFrameStkOffs + originalOffset; JITDUMP("---OSR--- V%02u (on tier0 frame, security cookie) tier0 FP-rel offset %d tier0 frame " "offset %d new " "virt offset %d\n", lclNum, originalOffset, originalFrameStkOffs, offset); lvaTable[lclNum].SetStackOffset(offset); } continue; } // These need to be located as the very first variables (highest memory address) // and so they have already been assigned an offset if ( #if defined(FEATURE_EH_FUNCLETS) lclNum == lvaPSPSym || #else lclNum == lvaShadowSPslotsVar || #endif // FEATURE_EH_FUNCLETS #ifdef JIT32_GCENCODER lclNum == lvaLocAllocSPvar || #endif // JIT32_GCENCODER lclNum == lvaRetAddrVar) { assert(varDsc->GetStackOffset() != BAD_STK_OFFS); continue; } if (lclNum == lvaMonAcquired) { continue; } // This should be low on the stack. Hence, it will be assigned later. if (lclNum == lvaStubArgumentVar) { #ifdef JIT32_GCENCODER noway_assert(codeGen->isFramePointerUsed()); #endif continue; } // This should be low on the stack. Hence, it will be assigned later. if (lclNum == lvaInlinedPInvokeFrameVar) { noway_assert(codeGen->isFramePointerUsed()); continue; } if (varDsc->lvIsParam) { #if defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI) // On Windows AMD64 we can use the caller-reserved stack area that is already setup assert(varDsc->GetStackOffset() != BAD_STK_OFFS); continue; #else // !TARGET_AMD64 // A register argument that is not enregistered ends up as // a local variable which will need stack frame space. // if (!varDsc->lvIsRegArg) { continue; } #ifdef TARGET_ARM64 if (info.compIsVarArgs && varDsc->GetArgReg() != theFixedRetBuffArgNum()) { // Stack offset to varargs (parameters) should point to home area which will be preallocated. const unsigned regArgNum = genMapIntRegNumToRegArgNum(varDsc->GetArgReg()); varDsc->SetStackOffset(-initialStkOffs + regArgNum * REGSIZE_BYTES); continue; } #endif #ifdef TARGET_ARM // On ARM we spill the registers in codeGen->regSet.rsMaskPreSpillRegArg // in the prolog, thus they don't need stack frame space. // if ((codeGen->regSet.rsMaskPreSpillRegs(false) & genRegMask(varDsc->GetArgReg())) != 0) { assert(varDsc->GetStackOffset() != BAD_STK_OFFS); continue; } #endif #endif // !TARGET_AMD64 } /* Make sure the type is appropriate */ if (varDsc->lvIsUnsafeBuffer && compGSReorderStackLayout) { if (varDsc->lvIsPtr) { if ((alloc_order[cur] & ALLOC_UNSAFE_BUFFERS_WITH_PTRS) == 0) { assignMore |= ALLOC_UNSAFE_BUFFERS_WITH_PTRS; continue; } } else { if ((alloc_order[cur] & ALLOC_UNSAFE_BUFFERS) == 0) { assignMore |= ALLOC_UNSAFE_BUFFERS; continue; } } } else if (varTypeIsGC(varDsc->TypeGet()) && varDsc->lvTracked) { if ((alloc_order[cur] & ALLOC_PTRS) == 0) { assignMore |= ALLOC_PTRS; continue; } } else { if ((alloc_order[cur] & ALLOC_NON_PTRS) == 0) { assignMore |= ALLOC_NON_PTRS; continue; } } /* Need to align the offset? */ if (mustDoubleAlign && (varDsc->lvType == TYP_DOUBLE // Align doubles for ARM and x86 #ifdef TARGET_ARM || varDsc->lvType == TYP_LONG // Align longs for ARM #endif #ifndef TARGET_64BIT || varDsc->lvStructDoubleAlign // Align when lvStructDoubleAlign is true #endif // !TARGET_64BIT )) { noway_assert((compLclFrameSize % TARGET_POINTER_SIZE) == 0); if ((lvaDoneFrameLayout != FINAL_FRAME_LAYOUT) && !have_LclVarDoubleAlign) { // If this is the first TYP_LONG, TYP_DOUBLE or double aligned struct // then we have seen in this loop then we allocate a pointer sized // stack slot since we may need to double align this LclVar // when lvaDoneFrameLayout == FINAL_FRAME_LAYOUT // lvaIncrementFrameSize(TARGET_POINTER_SIZE); stkOffs -= TARGET_POINTER_SIZE; } else { if (((stkOffs + preSpillSize) % (2 * TARGET_POINTER_SIZE)) != 0) { lvaIncrementFrameSize(TARGET_POINTER_SIZE); stkOffs -= TARGET_POINTER_SIZE; } // We should now have a double-aligned (stkOffs+preSpillSize) noway_assert(((stkOffs + preSpillSize) % (2 * TARGET_POINTER_SIZE)) == 0); } // Remember that we had to double align a LclVar have_LclVarDoubleAlign = true; } // Reserve the stack space for this variable stkOffs = lvaAllocLocalAndSetVirtualOffset(lclNum, lvaLclSize(lclNum), stkOffs); #ifdef TARGET_ARMARCH // If we have an incoming register argument that has a struct promoted field // then we need to copy the lvStkOff (the stack home) from the reg arg to the field lclvar // if (varDsc->lvIsRegArg && varDsc->lvPromotedStruct()) { unsigned firstFieldNum = varDsc->lvFieldLclStart; for (unsigned i = 0; i < varDsc->lvFieldCnt; i++) { LclVarDsc* fieldVarDsc = lvaGetDesc(firstFieldNum + i); fieldVarDsc->SetStackOffset(varDsc->GetStackOffset() + fieldVarDsc->lvFldOffset); } } #ifdef TARGET_ARM // If we have an incoming register argument that has a promoted long // then we need to copy the lvStkOff (the stack home) from the reg arg to the field lclvar // else if (varDsc->lvIsRegArg && varDsc->lvPromoted) { assert(varTypeIsLong(varDsc) && (varDsc->lvFieldCnt == 2)); unsigned fieldVarNum = varDsc->lvFieldLclStart; lvaTable[fieldVarNum].SetStackOffset(varDsc->GetStackOffset()); lvaTable[fieldVarNum + 1].SetStackOffset(varDsc->GetStackOffset() + 4); } #endif // TARGET_ARM #endif // TARGET_ARM64 } } if (getNeedsGSSecurityCookie() && !compGSReorderStackLayout) { if (!opts.IsOSR() || !info.compPatchpointInfo->HasSecurityCookie()) { // LOCALLOC used, but we have no unsafe buffer. Allocated cookie last, close to localloc buffer. stkOffs = lvaAllocLocalAndSetVirtualOffset(lvaGSSecurityCookie, lvaLclSize(lvaGSSecurityCookie), stkOffs); } } if (tempsAllocated == false) { /*------------------------------------------------------------------------- * * Now the temps * *------------------------------------------------------------------------- */ stkOffs = lvaAllocateTemps(stkOffs, mustDoubleAlign); } /*------------------------------------------------------------------------- * * Now do some final stuff * *------------------------------------------------------------------------- */ // lvaInlinedPInvokeFrameVar and lvaStubArgumentVar need to be assigned last // Important: The stack walker depends on lvaStubArgumentVar immediately // following lvaInlinedPInvokeFrameVar in the frame. if (lvaStubArgumentVar != BAD_VAR_NUM) { #ifdef JIT32_GCENCODER noway_assert(codeGen->isFramePointerUsed()); #endif stkOffs = lvaAllocLocalAndSetVirtualOffset(lvaStubArgumentVar, lvaLclSize(lvaStubArgumentVar), stkOffs); } if (lvaInlinedPInvokeFrameVar != BAD_VAR_NUM) { noway_assert(codeGen->isFramePointerUsed()); stkOffs = lvaAllocLocalAndSetVirtualOffset(lvaInlinedPInvokeFrameVar, lvaLclSize(lvaInlinedPInvokeFrameVar), stkOffs); } if (mustDoubleAlign) { if (lvaDoneFrameLayout != FINAL_FRAME_LAYOUT) { // Allocate a pointer sized stack slot, since we may need to double align here // when lvaDoneFrameLayout == FINAL_FRAME_LAYOUT // lvaIncrementFrameSize(TARGET_POINTER_SIZE); stkOffs -= TARGET_POINTER_SIZE; if (have_LclVarDoubleAlign) { // If we have any TYP_LONG, TYP_DOUBLE or double aligned structs // the we need to allocate a second pointer sized stack slot, // since we may need to double align the last LclVar that we saw // in the loop above. We do this so that the offsets that we // calculate for the stack frame are always greater than they will // be in the final layout. // lvaIncrementFrameSize(TARGET_POINTER_SIZE); stkOffs -= TARGET_POINTER_SIZE; } } else // FINAL_FRAME_LAYOUT { if (((stkOffs + preSpillSize) % (2 * TARGET_POINTER_SIZE)) != 0) { lvaIncrementFrameSize(TARGET_POINTER_SIZE); stkOffs -= TARGET_POINTER_SIZE; } // We should now have a double-aligned (stkOffs+preSpillSize) noway_assert(((stkOffs + preSpillSize) % (2 * TARGET_POINTER_SIZE)) == 0); } } #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_AMD64) if (lvaPSPSym != BAD_VAR_NUM) { // On AMD64, if we need a PSPSym, allocate it last, immediately above the outgoing argument // space. Any padding will be higher on the stack than this // (including the padding added by lvaAlignFrame()). noway_assert(codeGen->isFramePointerUsed()); // We need an explicit frame pointer stkOffs = lvaAllocLocalAndSetVirtualOffset(lvaPSPSym, TARGET_POINTER_SIZE, stkOffs); } #endif // FEATURE_EH_FUNCLETS && defined(TARGET_AMD64) #ifdef TARGET_ARM64 if (!codeGen->IsSaveFpLrWithAllCalleeSavedRegisters() && isFramePointerUsed()) // Note that currently we always have a frame pointer { // Create space for saving FP and LR. stkOffs -= 2 * REGSIZE_BYTES; } #endif // TARGET_ARM64 #if FEATURE_FIXED_OUT_ARGS if (lvaOutgoingArgSpaceSize > 0) { #if defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI) // No 4 slots for outgoing params on System V. noway_assert(lvaOutgoingArgSpaceSize >= (4 * TARGET_POINTER_SIZE)); #endif noway_assert((lvaOutgoingArgSpaceSize % TARGET_POINTER_SIZE) == 0); // Give it a value so we can avoid asserts in CHK builds. // Since this will always use an SP relative offset of zero // at the end of lvaFixVirtualFrameOffsets, it will be set to absolute '0' stkOffs = lvaAllocLocalAndSetVirtualOffset(lvaOutgoingArgSpaceVar, lvaLclSize(lvaOutgoingArgSpaceVar), stkOffs); } #endif // FEATURE_FIXED_OUT_ARGS // compLclFrameSize equals our negated virtual stack offset minus the pushed registers and return address // and the pushed frame pointer register which for some strange reason isn't part of 'compCalleeRegsPushed'. int pushedCount = compCalleeRegsPushed; #ifdef TARGET_ARM64 if (info.compIsVarArgs) { pushedCount += MAX_REG_ARG; } #endif #ifdef TARGET_XARCH if (codeGen->doubleAlignOrFramePointerUsed()) { pushedCount += 1; // pushed EBP (frame pointer) } pushedCount += 1; // pushed PC (return address) #endif noway_assert(compLclFrameSize + originalFrameSize == (unsigned)-(stkOffs + (pushedCount * (int)TARGET_POINTER_SIZE))); } int Compiler::lvaAllocLocalAndSetVirtualOffset(unsigned lclNum, unsigned size, int stkOffs) { noway_assert(lclNum != BAD_VAR_NUM); #ifdef TARGET_64BIT // Before final frame layout, assume the worst case, that every >=8 byte local will need // maximum padding to be aligned. This is because we generate code based on the stack offset // computed during tentative frame layout. These offsets cannot get bigger during final // frame layout, as that would possibly require different code generation (for example, // using a 4-byte offset instead of a 1-byte offset in an instruction). The offsets can get // smaller. It is possible there is different alignment at the point locals are allocated // between tentative and final frame layout which would introduce padding between locals // and thus increase the offset (from the stack pointer) of one of the locals. Hence the // need to assume the worst alignment before final frame layout. // We could probably improve this by sorting all the objects by alignment, // such that all 8 byte objects are together, 4 byte objects are together, etc., which // would require at most one alignment padding per group. // // TYP_SIMD structs locals have alignment preference given by getSIMDTypeAlignment() for // better performance. if ((size >= 8) && ((lvaDoneFrameLayout != FINAL_FRAME_LAYOUT) || ((stkOffs % 8) != 0) #if defined(FEATURE_SIMD) && ALIGN_SIMD_TYPES || lclVarIsSIMDType(lclNum) #endif )) { // Note that stack offsets are negative or equal to zero assert(stkOffs <= 0); // alignment padding unsigned pad = 0; #if defined(FEATURE_SIMD) && ALIGN_SIMD_TYPES if (lclVarIsSIMDType(lclNum) && !lvaIsImplicitByRefLocal(lclNum)) { int alignment = getSIMDTypeAlignment(lvaTable[lclNum].lvType); if (stkOffs % alignment != 0) { if (lvaDoneFrameLayout != FINAL_FRAME_LAYOUT) { pad = alignment - 1; // Note that all the objects will probably be misaligned, but we'll fix that in final layout. } else { pad = alignment + (stkOffs % alignment); // +1 to +(alignment-1) bytes } } } else #endif // FEATURE_SIMD && ALIGN_SIMD_TYPES { if (lvaDoneFrameLayout != FINAL_FRAME_LAYOUT) { pad = 7; // Note that all the objects will probably be misaligned, but we'll fix that in final layout. } else { pad = 8 + (stkOffs % 8); // +1 to +7 bytes } } // Will the pad ever be anything except 4? Do we put smaller-than-4-sized objects on the stack? lvaIncrementFrameSize(pad); stkOffs -= pad; #ifdef DEBUG if (verbose) { printf("Pad "); gtDispLclVar(lclNum, /*pad*/ false); printf(", size=%d, stkOffs=%c0x%x, pad=%d\n", size, stkOffs < 0 ? '-' : '+', stkOffs < 0 ? -stkOffs : stkOffs, pad); } #endif } #endif // TARGET_64BIT /* Reserve space on the stack by bumping the frame size */ lvaIncrementFrameSize(size); stkOffs -= size; lvaTable[lclNum].SetStackOffset(stkOffs); #ifdef DEBUG if (verbose) { printf("Assign "); gtDispLclVar(lclNum, /*pad*/ false); printf(", size=%d, stkOffs=%c0x%x\n", size, stkOffs < 0 ? '-' : '+', stkOffs < 0 ? -stkOffs : stkOffs); } #endif return stkOffs; } #ifdef TARGET_AMD64 /***************************************************************************** * lvaIsCalleeSavedIntRegCountEven() : returns true if the number of integer registers * pushed onto stack is even including RBP if used as frame pointer * * Note that this excludes return address (PC) pushed by caller. To know whether * the SP offset after pushing integer registers is aligned, we need to take * negation of this routine. */ bool Compiler::lvaIsCalleeSavedIntRegCountEven() { unsigned regsPushed = compCalleeRegsPushed + (codeGen->isFramePointerUsed() ? 1 : 0); return (regsPushed % (16 / REGSIZE_BYTES)) == 0; } #endif // TARGET_AMD64 /***************************************************************************** * lvaAlignFrame() : After allocating everything on the frame, reserve any * extra space needed to keep the frame aligned */ void Compiler::lvaAlignFrame() { #if defined(TARGET_AMD64) // Leaf frames do not need full alignment, but the unwind info is smaller if we // are at least 8 byte aligned (and we assert as much) if ((compLclFrameSize % 8) != 0) { lvaIncrementFrameSize(8 - (compLclFrameSize % 8)); } else if (lvaDoneFrameLayout != FINAL_FRAME_LAYOUT) { // If we are not doing final layout, we don't know the exact value of compLclFrameSize // and thus do not know how much we will need to add in order to be aligned. // We add 8 so compLclFrameSize is still a multiple of 8. lvaIncrementFrameSize(8); } assert((compLclFrameSize % 8) == 0); // Ensure that the stack is always 16-byte aligned by grabbing an unused QWORD // if needed, but off by 8 because of the return value. // And don't forget that compCalleeRegsPused does *not* include RBP if we are // using it as the frame pointer. // bool regPushedCountAligned = lvaIsCalleeSavedIntRegCountEven(); bool lclFrameSizeAligned = (compLclFrameSize % 16) == 0; // If this isn't the final frame layout, assume we have to push an extra QWORD // Just so the offsets are true upper limits. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef UNIX_AMD64_ABI // The compNeedToAlignFrame flag is indicating if there is a need to align the frame. // On AMD64-Windows, if there are calls, 4 slots for the outgoing ars are allocated, except for // FastTailCall. This slots makes the frame size non-zero, so alignment logic will be called. // On AMD64-Unix, there are no such slots. There is a possibility to have calls in the method with frame size of 0. // The frame alignment logic won't kick in. This flags takes care of the AMD64-Unix case by remembering that there // are calls and making sure the frame alignment logic is executed. bool stackNeedsAlignment = (compLclFrameSize != 0 || opts.compNeedToAlignFrame); #else // !UNIX_AMD64_ABI bool stackNeedsAlignment = compLclFrameSize != 0; #endif // !UNIX_AMD64_ABI if ((!codeGen->isFramePointerUsed() && (lvaDoneFrameLayout != FINAL_FRAME_LAYOUT)) || (stackNeedsAlignment && (regPushedCountAligned == lclFrameSizeAligned))) { lvaIncrementFrameSize(REGSIZE_BYTES); } #elif defined(TARGET_ARM64) // The stack on ARM64 must be 16 byte aligned. // First, align up to 8. if ((compLclFrameSize % 8) != 0) { lvaIncrementFrameSize(8 - (compLclFrameSize % 8)); } else if (lvaDoneFrameLayout != FINAL_FRAME_LAYOUT) { // If we are not doing final layout, we don't know the exact value of compLclFrameSize // and thus do not know how much we will need to add in order to be aligned. // We add 8 so compLclFrameSize is still a multiple of 8. lvaIncrementFrameSize(8); } assert((compLclFrameSize % 8) == 0); // Ensure that the stack is always 16-byte aligned by grabbing an unused QWORD // if needed. bool regPushedCountAligned = (compCalleeRegsPushed % (16 / REGSIZE_BYTES)) == 0; bool lclFrameSizeAligned = (compLclFrameSize % 16) == 0; // If this isn't the final frame layout, assume we have to push an extra QWORD // Just so the offsets are true upper limits. if ((lvaDoneFrameLayout != FINAL_FRAME_LAYOUT) || (regPushedCountAligned != lclFrameSizeAligned)) { lvaIncrementFrameSize(REGSIZE_BYTES); } #elif defined(TARGET_ARM) // Ensure that stack offsets will be double-aligned by grabbing an unused DWORD if needed. // bool lclFrameSizeAligned = (compLclFrameSize % sizeof(double)) == 0; bool regPushedCountAligned = ((compCalleeRegsPushed + genCountBits(codeGen->regSet.rsMaskPreSpillRegs(true))) % (sizeof(double) / TARGET_POINTER_SIZE)) == 0; if (regPushedCountAligned != lclFrameSizeAligned) { lvaIncrementFrameSize(TARGET_POINTER_SIZE); } #elif defined(TARGET_X86) #if DOUBLE_ALIGN if (genDoubleAlign()) { // Double Frame Alignment for x86 is handled in Compiler::lvaAssignVirtualFrameOffsetsToLocals() if (compLclFrameSize == 0) { // This can only happen with JitStress=1 or JitDoubleAlign=2 lvaIncrementFrameSize(TARGET_POINTER_SIZE); } } #endif if (STACK_ALIGN > REGSIZE_BYTES) { if (lvaDoneFrameLayout != FINAL_FRAME_LAYOUT) { // If we are not doing final layout, we don't know the exact value of compLclFrameSize // and thus do not know how much we will need to add in order to be aligned. // We add the maximum pad that we could ever have (which is 12) lvaIncrementFrameSize(STACK_ALIGN - REGSIZE_BYTES); } // Align the stack with STACK_ALIGN value. int adjustFrameSize = compLclFrameSize; #if defined(UNIX_X86_ABI) bool isEbpPushed = codeGen->isFramePointerUsed(); #if DOUBLE_ALIGN isEbpPushed |= genDoubleAlign(); #endif // we need to consider spilled register(s) plus return address and/or EBP int adjustCount = compCalleeRegsPushed + 1 + (isEbpPushed ? 1 : 0); adjustFrameSize += (adjustCount * REGSIZE_BYTES) % STACK_ALIGN; #endif if ((adjustFrameSize % STACK_ALIGN) != 0) { lvaIncrementFrameSize(STACK_ALIGN - (adjustFrameSize % STACK_ALIGN)); } } #else NYI("TARGET specific lvaAlignFrame"); #endif // !TARGET_AMD64 } /***************************************************************************** * lvaAssignFrameOffsetsToPromotedStructs() : Assign offsets to fields * within a promoted struct (worker for lvaAssignFrameOffsets). */ void Compiler::lvaAssignFrameOffsetsToPromotedStructs() { LclVarDsc* varDsc = lvaTable; for (unsigned lclNum = 0; lclNum < lvaCount; lclNum++, varDsc++) { // For promoted struct fields that are params, we will // assign their offsets in lvaAssignVirtualFrameOffsetToArg(). // This is not true for the System V systems since there is no // outgoing args space. Assign the dependently promoted fields properly. // CLANG_FORMAT_COMMENT_ANCHOR; #if defined(UNIX_AMD64_ABI) || defined(TARGET_ARM) || defined(TARGET_X86) // ARM: lo/hi parts of a promoted long arg need to be updated. // // For System V platforms there is no outgoing args space. // // For System V and x86, a register passed struct arg is homed on the stack in a separate local var. // The offset of these structs is already calculated in lvaAssignVirtualFrameOffsetToArg methos. // Make sure the code below is not executed for these structs and the offset is not changed. // const bool mustProcessParams = true; #else // OSR must also assign offsets here. // const bool mustProcessParams = opts.IsOSR(); #endif // defined(UNIX_AMD64_ABI) || defined(TARGET_ARM) || defined(TARGET_X86) if (varDsc->lvIsStructField && (!varDsc->lvIsParam || mustProcessParams)) { LclVarDsc* parentvarDsc = lvaGetDesc(varDsc->lvParentLcl); lvaPromotionType promotionType = lvaGetPromotionType(parentvarDsc); if (promotionType == PROMOTION_TYPE_INDEPENDENT) { // The stack offset for these field locals must have been calculated // by the normal frame offset assignment. continue; } else { noway_assert(promotionType == PROMOTION_TYPE_DEPENDENT); noway_assert(varDsc->lvOnFrame); if (parentvarDsc->lvOnFrame) { JITDUMP("Adjusting offset of dependent V%02u of V%02u: parent %u field %u net %u\n", lclNum, varDsc->lvParentLcl, parentvarDsc->GetStackOffset(), varDsc->lvFldOffset, parentvarDsc->GetStackOffset() + varDsc->lvFldOffset); varDsc->SetStackOffset(parentvarDsc->GetStackOffset() + varDsc->lvFldOffset); } else { varDsc->lvOnFrame = false; noway_assert(varDsc->lvRefCnt() == 0); } } } } } /***************************************************************************** * lvaAllocateTemps() : Assign virtual offsets to temps (always negative). */ int Compiler::lvaAllocateTemps(int stkOffs, bool mustDoubleAlign) { unsigned spillTempSize = 0; if (lvaDoneFrameLayout == FINAL_FRAME_LAYOUT) { int preSpillSize = 0; #ifdef TARGET_ARM preSpillSize = genCountBits(codeGen->regSet.rsMaskPreSpillRegs(true)) * TARGET_POINTER_SIZE; #endif /* Allocate temps */ assert(codeGen->regSet.tmpAllFree()); for (TempDsc* temp = codeGen->regSet.tmpListBeg(); temp != nullptr; temp = codeGen->regSet.tmpListNxt(temp)) { var_types tempType = temp->tdTempType(); unsigned size = temp->tdTempSize(); /* Figure out and record the stack offset of the temp */ /* Need to align the offset? */ CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_64BIT if (varTypeIsGC(tempType) && ((stkOffs % TARGET_POINTER_SIZE) != 0)) { // Calculate 'pad' as the number of bytes to align up 'stkOffs' to be a multiple of TARGET_POINTER_SIZE // In practice this is really just a fancy way of writing 4. (as all stack locations are at least 4-byte // aligned). Note stkOffs is always negative, so (stkOffs % TARGET_POINTER_SIZE) yields a negative // value. // int alignPad = (int)AlignmentPad((unsigned)-stkOffs, TARGET_POINTER_SIZE); spillTempSize += alignPad; lvaIncrementFrameSize(alignPad); stkOffs -= alignPad; noway_assert((stkOffs % TARGET_POINTER_SIZE) == 0); } #endif if (mustDoubleAlign && (tempType == TYP_DOUBLE)) // Align doubles for x86 and ARM { noway_assert((compLclFrameSize % TARGET_POINTER_SIZE) == 0); if (((stkOffs + preSpillSize) % (2 * TARGET_POINTER_SIZE)) != 0) { spillTempSize += TARGET_POINTER_SIZE; lvaIncrementFrameSize(TARGET_POINTER_SIZE); stkOffs -= TARGET_POINTER_SIZE; } // We should now have a double-aligned (stkOffs+preSpillSize) noway_assert(((stkOffs + preSpillSize) % (2 * TARGET_POINTER_SIZE)) == 0); } spillTempSize += size; lvaIncrementFrameSize(size); stkOffs -= size; temp->tdSetTempOffs(stkOffs); } #ifdef TARGET_ARM // Only required for the ARM platform that we have an accurate estimate for the spillTempSize noway_assert(spillTempSize <= lvaGetMaxSpillTempSize()); #endif } else // We haven't run codegen, so there are no Spill temps yet! { unsigned size = lvaGetMaxSpillTempSize(); lvaIncrementFrameSize(size); stkOffs -= size; } return stkOffs; } #ifdef DEBUG /***************************************************************************** * * Dump the register a local is in right now. It is only the current location, since the location changes and it * is updated throughout code generation based on LSRA register assignments. */ void Compiler::lvaDumpRegLocation(unsigned lclNum) { const LclVarDsc* varDsc = lvaGetDesc(lclNum); #ifdef TARGET_ARM if (varDsc->TypeGet() == TYP_DOUBLE) { // The assigned registers are `lvRegNum:RegNext(lvRegNum)` printf("%3s:%-3s ", getRegName(varDsc->GetRegNum()), getRegName(REG_NEXT(varDsc->GetRegNum()))); } else #endif // TARGET_ARM { printf("%3s ", getRegName(varDsc->GetRegNum())); } } /***************************************************************************** * * Dump the frame location assigned to a local. * It's the home location, even though the variable doesn't always live * in its home location. */ void Compiler::lvaDumpFrameLocation(unsigned lclNum) { int offset; regNumber baseReg; #ifdef TARGET_ARM offset = lvaFrameAddress(lclNum, compLocallocUsed, &baseReg, 0, /* isFloatUsage */ false); #else bool EBPbased; offset = lvaFrameAddress(lclNum, &EBPbased); baseReg = EBPbased ? REG_FPBASE : REG_SPBASE; #endif printf("[%2s%1s%02XH] ", getRegName(baseReg), (offset < 0 ? "-" : "+"), (offset < 0 ? -offset : offset)); } /***************************************************************************** * * dump a single lvaTable entry */ void Compiler::lvaDumpEntry(unsigned lclNum, FrameLayoutState curState, size_t refCntWtdWidth) { LclVarDsc* varDsc = lvaGetDesc(lclNum); var_types type = varDsc->TypeGet(); if (curState == INITIAL_FRAME_LAYOUT) { printf("; "); gtDispLclVar(lclNum); printf(" %7s ", varTypeName(type)); gtDispLclVarStructType(lclNum); } else { if (varDsc->lvRefCnt() == 0) { // Print this with a special indicator that the variable is unused. Even though the // variable itself is unused, it might be a struct that is promoted, so seeing it // can be useful when looking at the promoted struct fields. It's also weird to see // missing var numbers if these aren't printed. printf(";* "); } #if FEATURE_FIXED_OUT_ARGS // Since lvaOutgoingArgSpaceSize is a PhasedVar we can't read it for Dumping until // after we set it to something. else if ((lclNum == lvaOutgoingArgSpaceVar) && lvaOutgoingArgSpaceSize.HasFinalValue() && (lvaOutgoingArgSpaceSize == 0)) { // Similar to above; print this anyway. printf(";# "); } #endif // FEATURE_FIXED_OUT_ARGS else { printf("; "); } gtDispLclVar(lclNum); printf("[V%02u", lclNum); if (varDsc->lvTracked) { printf(",T%02u]", varDsc->lvVarIndex); } else { printf(" ]"); } printf(" (%3u,%*s)", varDsc->lvRefCnt(), (int)refCntWtdWidth, refCntWtd2str(varDsc->lvRefCntWtd())); printf(" %7s ", varTypeName(type)); if (genTypeSize(type) == 0) { printf("(%2d) ", lvaLclSize(lclNum)); } else { printf(" -> "); } // The register or stack location field is 11 characters wide. if ((varDsc->lvRefCnt() == 0) && !varDsc->lvImplicitlyReferenced) { printf("zero-ref "); } else if (varDsc->lvRegister != 0) { // It's always a register, and always in the same register. lvaDumpRegLocation(lclNum); } else if (varDsc->lvOnFrame == 0) { printf("registers "); } else { // For RyuJIT backend, it might be in a register part of the time, but it will definitely have a stack home // location. Otherwise, it's always on the stack. if (lvaDoneFrameLayout != NO_FRAME_LAYOUT) { lvaDumpFrameLocation(lclNum); } } } if (varDsc->lvIsHfa()) { printf(" HFA(%s) ", varTypeName(varDsc->GetHfaType())); } if (varDsc->lvDoNotEnregister) { printf(" do-not-enreg["); if (varDsc->IsAddressExposed()) { printf("X"); } if (varTypeIsStruct(varDsc)) { printf("S"); } if (varDsc->GetDoNotEnregReason() == DoNotEnregisterReason::VMNeedsStackAddr) { printf("V"); } if (lvaEnregEHVars && varDsc->lvLiveInOutOfHndlr) { printf("%c", varDsc->lvSingleDefDisqualifyReason); } if (varDsc->GetDoNotEnregReason() == DoNotEnregisterReason::LocalField) { printf("F"); } if (varDsc->GetDoNotEnregReason() == DoNotEnregisterReason::BlockOp) { printf("B"); } if (varDsc->lvIsMultiRegArg) { printf("A"); } if (varDsc->lvIsMultiRegRet) { printf("R"); } #ifdef JIT32_GCENCODER if (varDsc->lvPinned) printf("P"); #endif // JIT32_GCENCODER printf("]"); } if (varDsc->lvIsMultiRegArg) { printf(" multireg-arg"); } if (varDsc->lvIsMultiRegRet) { printf(" multireg-ret"); } if (varDsc->lvMustInit) { printf(" must-init"); } if (varDsc->IsAddressExposed()) { printf(" addr-exposed"); } if (varDsc->lvHasLdAddrOp) { printf(" ld-addr-op"); } if (varDsc->lvVerTypeInfo.IsThisPtr()) { printf(" this"); } if (varDsc->lvPinned) { printf(" pinned"); } if (varDsc->lvStackByref) { printf(" stack-byref"); } if (varDsc->lvClassHnd != NO_CLASS_HANDLE) { printf(" class-hnd"); } if (varDsc->lvClassIsExact) { printf(" exact"); } if (varDsc->lvLiveInOutOfHndlr) { printf(" EH-live"); } if (varDsc->lvSpillAtSingleDef) { printf(" spill-single-def"); } else if (varDsc->lvSingleDefRegCandidate) { printf(" single-def"); } if (lvaIsOSRLocal(lclNum) && varDsc->lvOnFrame) { printf(" tier0-frame"); } #ifndef TARGET_64BIT if (varDsc->lvStructDoubleAlign) printf(" double-align"); #endif // !TARGET_64BIT if (varDsc->lvOverlappingFields) { printf(" overlapping-fields"); } if (compGSReorderStackLayout && !varDsc->lvRegister) { if (varDsc->lvIsPtr) { printf(" ptr"); } if (varDsc->lvIsUnsafeBuffer) { printf(" unsafe-buffer"); } } if (varDsc->lvIsStructField) { LclVarDsc* parentvarDsc = lvaGetDesc(varDsc->lvParentLcl); #if !defined(TARGET_64BIT) if (varTypeIsLong(parentvarDsc)) { bool isLo = (lclNum == parentvarDsc->lvFieldLclStart); printf(" V%02u.%s(offs=0x%02x)", varDsc->lvParentLcl, isLo ? "lo" : "hi", isLo ? 0 : genTypeSize(TYP_INT)); } else #endif // !defined(TARGET_64BIT) { CORINFO_CLASS_HANDLE typeHnd = parentvarDsc->GetStructHnd(); CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(typeHnd, varDsc->lvFldOrdinal); printf(" V%02u.%s(offs=0x%02x)", varDsc->lvParentLcl, eeGetFieldName(fldHnd), varDsc->lvFldOffset); lvaPromotionType promotionType = lvaGetPromotionType(parentvarDsc); switch (promotionType) { case PROMOTION_TYPE_NONE: printf(" P-NONE"); break; case PROMOTION_TYPE_DEPENDENT: printf(" P-DEP"); break; case PROMOTION_TYPE_INDEPENDENT: printf(" P-INDEP"); break; } } } if (varDsc->lvReason != nullptr) { printf(" \"%s\"", varDsc->lvReason); } printf("\n"); } /***************************************************************************** * * dump the lvaTable */ void Compiler::lvaTableDump(FrameLayoutState curState) { if (curState == NO_FRAME_LAYOUT) { curState = lvaDoneFrameLayout; if (curState == NO_FRAME_LAYOUT) { // Still no layout? Could be a bug, but just display the initial layout curState = INITIAL_FRAME_LAYOUT; } } if (curState == INITIAL_FRAME_LAYOUT) { printf("; Initial"); } else if (curState == PRE_REGALLOC_FRAME_LAYOUT) { printf("; Pre-RegAlloc"); } else if (curState == REGALLOC_FRAME_LAYOUT) { printf("; RegAlloc"); } else if (curState == TENTATIVE_FRAME_LAYOUT) { printf("; Tentative"); } else if (curState == FINAL_FRAME_LAYOUT) { printf("; Final"); } else { printf("UNKNOWN FrameLayoutState!"); unreached(); } printf(" local variable assignments\n"); printf(";\n"); unsigned lclNum; LclVarDsc* varDsc; // Figure out some sizes, to help line things up size_t refCntWtdWidth = 6; // Use 6 as the minimum width if (curState != INITIAL_FRAME_LAYOUT) // don't need this info for INITIAL_FRAME_LAYOUT { for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++) { size_t width = strlen(refCntWtd2str(varDsc->lvRefCntWtd())); if (width > refCntWtdWidth) { refCntWtdWidth = width; } } } // Do the actual output for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++) { lvaDumpEntry(lclNum, curState, refCntWtdWidth); } //------------------------------------------------------------------------- // Display the code-gen temps assert(codeGen->regSet.tmpAllFree()); for (TempDsc* temp = codeGen->regSet.tmpListBeg(); temp != nullptr; temp = codeGen->regSet.tmpListNxt(temp)) { printf("; TEMP_%02u %26s%*s%7s -> ", -temp->tdTempNum(), " ", refCntWtdWidth, " ", varTypeName(temp->tdTempType())); int offset = temp->tdTempOffs(); printf(" [%2s%1s0x%02X]\n", isFramePointerUsed() ? STR_FPBASE : STR_SPBASE, (offset < 0 ? "-" : "+"), (offset < 0 ? -offset : offset)); } if (curState >= TENTATIVE_FRAME_LAYOUT) { printf(";\n"); printf("; Lcl frame size = %d\n", compLclFrameSize); } } #endif // DEBUG /***************************************************************************** * * Conservatively estimate the layout of the stack frame. * * This function is only used before final frame layout. It conservatively estimates the * number of callee-saved registers that must be saved, then calls lvaAssignFrameOffsets(). * To do final frame layout, the callee-saved registers are known precisely, so * lvaAssignFrameOffsets() is called directly. * * Returns the (conservative, that is, overly large) estimated size of the frame, * including the callee-saved registers. This is only used by the emitter during code * generation when estimating the size of the offset of instructions accessing temps, * and only if temps have a larger offset than variables. */ unsigned Compiler::lvaFrameSize(FrameLayoutState curState) { assert(curState < FINAL_FRAME_LAYOUT); unsigned result; /* Layout the stack frame conservatively. Assume all callee-saved registers are spilled to stack */ compCalleeRegsPushed = CNT_CALLEE_SAVED; #if defined(TARGET_ARMARCH) if (compFloatingPointUsed) compCalleeRegsPushed += CNT_CALLEE_SAVED_FLOAT; compCalleeRegsPushed++; // we always push LR. See genPushCalleeSavedRegisters #elif defined(TARGET_AMD64) if (compFloatingPointUsed) { compCalleeFPRegsSavedMask = RBM_FLT_CALLEE_SAVED; } else { compCalleeFPRegsSavedMask = RBM_NONE; } #endif #if DOUBLE_ALIGN if (genDoubleAlign()) { // X86 only - account for extra 4-byte pad that may be created by "and esp, -8" instruction compCalleeRegsPushed++; } #endif #ifdef TARGET_XARCH // Since FP/EBP is included in the SAVED_REG_MAXSZ we need to // subtract 1 register if codeGen->isFramePointerUsed() is true. if (codeGen->isFramePointerUsed()) { compCalleeRegsPushed--; } #endif lvaAssignFrameOffsets(curState); unsigned calleeSavedRegMaxSz = CALLEE_SAVED_REG_MAXSZ; #if defined(TARGET_ARMARCH) if (compFloatingPointUsed) { calleeSavedRegMaxSz += CALLEE_SAVED_FLOAT_MAXSZ; } calleeSavedRegMaxSz += REGSIZE_BYTES; // we always push LR. See genPushCalleeSavedRegisters #endif result = compLclFrameSize + calleeSavedRegMaxSz; return result; } //------------------------------------------------------------------------ // lvaGetSPRelativeOffset: Given a variable, return the offset of that // variable in the frame from the stack pointer. This number will be positive, // since the stack pointer must be at a lower address than everything on the // stack. // // This can't be called for localloc functions, since the stack pointer // varies, and thus there is no fixed offset to a variable from the stack pointer. // // Arguments: // varNum - the variable number // // Return Value: // The offset. int Compiler::lvaGetSPRelativeOffset(unsigned varNum) { assert(!compLocallocUsed); assert(lvaDoneFrameLayout == FINAL_FRAME_LAYOUT); const LclVarDsc* varDsc = lvaGetDesc(varNum); assert(varDsc->lvOnFrame); int spRelativeOffset; if (varDsc->lvFramePointerBased) { // The stack offset is relative to the frame pointer, so convert it to be // relative to the stack pointer (which makes no sense for localloc functions). spRelativeOffset = varDsc->GetStackOffset() + codeGen->genSPtoFPdelta(); } else { spRelativeOffset = varDsc->GetStackOffset(); } assert(spRelativeOffset >= 0); return spRelativeOffset; } /***************************************************************************** * * Return the caller-SP-relative stack offset of a local/parameter. * Requires the local to be on the stack and frame layout to be complete. */ int Compiler::lvaGetCallerSPRelativeOffset(unsigned varNum) { assert(lvaDoneFrameLayout == FINAL_FRAME_LAYOUT); const LclVarDsc* varDsc = lvaGetDesc(varNum); assert(varDsc->lvOnFrame); return lvaToCallerSPRelativeOffset(varDsc->GetStackOffset(), varDsc->lvFramePointerBased); } //----------------------------------------------------------------------------- // lvaToCallerSPRelativeOffset: translate a frame offset into an offset from // the caller's stack pointer. // // Arguments: // offset - frame offset // isFpBase - if true, offset is from FP, otherwise offset is from SP // forRootFrame - if the current method is an OSR method, adjust the offset // to be relative to the SP for the root method, instead of being relative // to the SP for the OSR method. // // Returins: // suitable offset // int Compiler::lvaToCallerSPRelativeOffset(int offset, bool isFpBased, bool forRootFrame) const { assert(lvaDoneFrameLayout == FINAL_FRAME_LAYOUT); if (isFpBased) { offset += codeGen->genCallerSPtoFPdelta(); } else { offset += codeGen->genCallerSPtoInitialSPdelta(); } #if defined(TARGET_AMD64) || defined(TARGET_ARM64) if (forRootFrame && opts.IsOSR()) { const PatchpointInfo* const ppInfo = info.compPatchpointInfo; #if defined(TARGET_AMD64) // The offset computed above already includes the OSR frame adjustment, plus the // pop of the "pseudo return address" from the OSR frame. // // To get to root method caller-SP, we need to subtract off the tier0 frame // size and the pushed return address and RBP for the tier0 frame (which we know is an // RPB frame). // // ppInfo's TotalFrameSize also accounts for the popped pseudo return address // between the tier0 method frame and the OSR frame. So the net adjustment // is simply TotalFrameSize plus one register. // const int adjustment = ppInfo->TotalFrameSize() + REGSIZE_BYTES; #elif defined(TARGET_ARM64) const int adjustment = ppInfo->TotalFrameSize(); #endif offset -= adjustment; } #else // OSR NYI for other targets. assert(!opts.IsOSR()); #endif return offset; } /***************************************************************************** * * Return the Initial-SP-relative stack offset of a local/parameter. * Requires the local to be on the stack and frame layout to be complete. */ int Compiler::lvaGetInitialSPRelativeOffset(unsigned varNum) { assert(lvaDoneFrameLayout == FINAL_FRAME_LAYOUT); const LclVarDsc* varDsc = lvaGetDesc(varNum); assert(varDsc->lvOnFrame); return lvaToInitialSPRelativeOffset(varDsc->GetStackOffset(), varDsc->lvFramePointerBased); } // Given a local variable offset, and whether that offset is frame-pointer based, return its offset from Initial-SP. // This is used, for example, to figure out the offset of the frame pointer from Initial-SP. int Compiler::lvaToInitialSPRelativeOffset(unsigned offset, bool isFpBased) { assert(lvaDoneFrameLayout == FINAL_FRAME_LAYOUT); #ifdef TARGET_AMD64 if (isFpBased) { // Currently, the frame starts by pushing ebp, ebp points to the saved ebp // (so we have ebp pointer chaining). Add the fixed-size frame size plus the // size of the callee-saved regs (not including ebp itself) to find Initial-SP. assert(codeGen->isFramePointerUsed()); offset += codeGen->genSPtoFPdelta(); } else { // The offset is correct already! } #else // !TARGET_AMD64 NYI("lvaToInitialSPRelativeOffset"); #endif // !TARGET_AMD64 return offset; } /*****************************************************************************/ #ifdef DEBUG /***************************************************************************** * Pick a padding size at "random" for the local. * 0 means that it should not be converted to a GT_LCL_FLD */ static unsigned LCL_FLD_PADDING(unsigned lclNum) { // Convert every 2nd variable if (lclNum % 2) { return 0; } // Pick a padding size at "random" unsigned size = lclNum % 7; return size; } /***************************************************************************** * * Callback for fgWalkAllTreesPre() * Convert as many GT_LCL_VAR's to GT_LCL_FLD's */ /* static */ /* The stress mode does 2 passes. In the first pass we will mark the locals where we CAN't apply the stress mode. In the second pass we will do the appropiate morphing wherever we've not determined we can't do it. */ Compiler::fgWalkResult Compiler::lvaStressLclFldCB(GenTree** pTree, fgWalkData* data) { GenTree* tree = *pTree; genTreeOps oper = tree->OperGet(); GenTree* lcl; switch (oper) { case GT_LCL_VAR: case GT_LCL_VAR_ADDR: lcl = tree; break; case GT_ADDR: if (tree->AsOp()->gtOp1->gtOper != GT_LCL_VAR) { return WALK_CONTINUE; } lcl = tree->AsOp()->gtOp1; break; default: return WALK_CONTINUE; } noway_assert(lcl->OperIs(GT_LCL_VAR, GT_LCL_VAR_ADDR)); Compiler* const pComp = ((lvaStressLclFldArgs*)data->pCallbackData)->m_pCompiler; const bool bFirstPass = ((lvaStressLclFldArgs*)data->pCallbackData)->m_bFirstPass; const unsigned lclNum = lcl->AsLclVarCommon()->GetLclNum(); var_types type = lcl->TypeGet(); LclVarDsc* const varDsc = pComp->lvaGetDesc(lclNum); if (varDsc->lvNoLclFldStress) { // Already determined we can't do anything for this var return WALK_SKIP_SUBTREES; } if (bFirstPass) { // Ignore arguments and temps if (varDsc->lvIsParam || lclNum >= pComp->info.compLocalsCount) { varDsc->lvNoLclFldStress = true; return WALK_SKIP_SUBTREES; } // Ignore OSR locals; if in memory, they will live on the // Tier0 frame and so can't have their storage adjusted. // if (pComp->lvaIsOSRLocal(lclNum)) { varDsc->lvNoLclFldStress = true; return WALK_SKIP_SUBTREES; } // Likewise for Tier0 methods with patchpoints -- // if we modify them we'll misreport their locations in the patchpoint info. // if (pComp->doesMethodHavePatchpoints() || pComp->doesMethodHavePartialCompilationPatchpoints()) { varDsc->lvNoLclFldStress = true; return WALK_SKIP_SUBTREES; } // Fix for lcl_fld stress mode if (varDsc->lvKeepType) { varDsc->lvNoLclFldStress = true; return WALK_SKIP_SUBTREES; } // Can't have GC ptrs in TYP_BLK. if (!varTypeIsArithmetic(type)) { varDsc->lvNoLclFldStress = true; return WALK_SKIP_SUBTREES; } // The noway_assert in the second pass below, requires that these types match, or we have a TYP_BLK // if ((varDsc->lvType != lcl->gtType) && (varDsc->lvType != TYP_BLK)) { varDsc->lvNoLclFldStress = true; return WALK_SKIP_SUBTREES; } // Weed out "small" types like TYP_BYTE as we don't mark the GT_LCL_VAR // node with the accurate small type. If we bash lvaTable[].lvType, // then there will be no indication that it was ever a small type. var_types varType = varDsc->TypeGet(); if (varType != TYP_BLK && genTypeSize(varType) != genTypeSize(genActualType(varType))) { varDsc->lvNoLclFldStress = true; return WALK_SKIP_SUBTREES; } // Offset some of the local variable by a "random" non-zero amount unsigned padding = LCL_FLD_PADDING(lclNum); if (padding == 0) { varDsc->lvNoLclFldStress = true; return WALK_SKIP_SUBTREES; } } else { // Do the morphing noway_assert((varDsc->lvType == lcl->gtType) || (varDsc->lvType == TYP_BLK)); var_types varType = varDsc->TypeGet(); // Calculate padding unsigned padding = LCL_FLD_PADDING(lclNum); #ifdef TARGET_ARMARCH // We need to support alignment requirements to access memory on ARM ARCH unsigned alignment = 1; pComp->codeGen->InferOpSizeAlign(lcl, &alignment); alignment = roundUp(alignment, TARGET_POINTER_SIZE); padding = roundUp(padding, alignment); #endif // TARGET_ARMARCH // Change the variable to a TYP_BLK if (varType != TYP_BLK) { varDsc->lvExactSize = roundUp(padding + pComp->lvaLclSize(lclNum), TARGET_POINTER_SIZE); varDsc->lvType = TYP_BLK; pComp->lvaSetVarAddrExposed(lclNum DEBUGARG(AddressExposedReason::STRESS_LCL_FLD)); } tree->gtFlags |= GTF_GLOB_REF; /* Now morph the tree appropriately */ if (oper == GT_LCL_VAR) { /* Change lclVar(lclNum) to lclFld(lclNum,padding) */ tree->ChangeOper(GT_LCL_FLD); tree->AsLclFld()->SetLclOffs(padding); } else if (oper == GT_LCL_VAR_ADDR) { tree->ChangeOper(GT_LCL_FLD_ADDR); tree->AsLclFld()->SetLclOffs(padding); } else { /* Change addr(lclVar) to addr(lclVar)+padding */ noway_assert(oper == GT_ADDR); GenTree* paddingTree = pComp->gtNewIconNode(padding); GenTree* newAddr = pComp->gtNewOperNode(GT_ADD, tree->gtType, tree, paddingTree); *pTree = newAddr; lcl->gtType = TYP_BLK; } } return WALK_SKIP_SUBTREES; } /*****************************************************************************/ void Compiler::lvaStressLclFld() { if (!compStressCompile(STRESS_LCL_FLDS, 5)) { return; } lvaStressLclFldArgs Args; Args.m_pCompiler = this; Args.m_bFirstPass = true; // Do First pass fgWalkAllTreesPre(lvaStressLclFldCB, &Args); // Second pass Args.m_bFirstPass = false; fgWalkAllTreesPre(lvaStressLclFldCB, &Args); } #endif // DEBUG /***************************************************************************** * * A little routine that displays a local variable bitset. * 'set' is mask of variables that have to be displayed * 'allVars' is the complete set of interesting variables (blank space is * inserted if its corresponding bit is not in 'set'). */ #ifdef DEBUG void Compiler::lvaDispVarSet(VARSET_VALARG_TP set) { VARSET_TP allVars(VarSetOps::MakeEmpty(this)); lvaDispVarSet(set, allVars); } void Compiler::lvaDispVarSet(VARSET_VALARG_TP set, VARSET_VALARG_TP allVars) { printf("{"); bool needSpace = false; for (unsigned index = 0; index < lvaTrackedCount; index++) { if (VarSetOps::IsMember(this, set, index)) { unsigned lclNum; LclVarDsc* varDsc; /* Look for the matching variable */ for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++) { if ((varDsc->lvVarIndex == index) && varDsc->lvTracked) { break; } } if (needSpace) { printf(" "); } else { needSpace = true; } printf("V%02u", lclNum); } else if (VarSetOps::IsMember(this, allVars, index)) { if (needSpace) { printf(" "); } else { needSpace = true; } printf(" "); } } printf("}"); } #endif // DEBUG
1
dotnet/runtime
66,367
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled
When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
jakobbotsch
2022-03-09T00:01:53Z
2022-03-09T12:59:09Z
c9f7f7389e8e9a00d501aef696333b67d218baac
cef825fd5425203ac28d073bf9fccc33c638f179
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled. When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
./src/coreclr/jit/morph.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Morph XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif #include "allocacheck.h" // for alloca // Convert the given node into a call to the specified helper passing // the given argument list. // // Tries to fold constants and also adds an edge for overflow exception // returns the morphed tree GenTree* Compiler::fgMorphCastIntoHelper(GenTree* tree, int helper, GenTree* oper) { GenTree* result; /* If the operand is a constant, we'll try to fold it */ if (oper->OperIsConst()) { GenTree* oldTree = tree; tree = gtFoldExprConst(tree); // This may not fold the constant (NaN ...) if (tree != oldTree) { return fgMorphTree(tree); } else if (tree->OperIsConst()) { return fgMorphConst(tree); } // assert that oper is unchanged and that it is still a GT_CAST node noway_assert(tree->AsCast()->CastOp() == oper); noway_assert(tree->gtOper == GT_CAST); } result = fgMorphIntoHelperCall(tree, helper, gtNewCallArgs(oper)); assert(result == tree); return result; } /***************************************************************************** * * Convert the given node into a call to the specified helper passing * the given argument list. */ GenTree* Compiler::fgMorphIntoHelperCall(GenTree* tree, int helper, GenTreeCall::Use* args, bool morphArgs) { // The helper call ought to be semantically equivalent to the original node, so preserve its VN. tree->ChangeOper(GT_CALL, GenTree::PRESERVE_VN); GenTreeCall* call = tree->AsCall(); call->gtCallType = CT_HELPER; call->gtReturnType = tree->TypeGet(); call->gtCallMethHnd = eeFindHelper(helper); call->gtCallThisArg = nullptr; call->gtCallArgs = args; call->gtCallLateArgs = nullptr; call->fgArgInfo = nullptr; call->gtRetClsHnd = nullptr; call->gtCallMoreFlags = GTF_CALL_M_EMPTY; call->gtInlineCandidateInfo = nullptr; call->gtControlExpr = nullptr; #ifdef UNIX_X86_ABI call->gtFlags |= GTF_CALL_POP_ARGS; #endif // UNIX_X86_ABI #if DEBUG // Helper calls are never candidates. call->gtInlineObservation = InlineObservation::CALLSITE_IS_CALL_TO_HELPER; call->callSig = nullptr; #endif // DEBUG #ifdef FEATURE_READYTORUN call->gtEntryPoint.addr = nullptr; call->gtEntryPoint.accessType = IAT_VALUE; #endif #if FEATURE_MULTIREG_RET call->ResetReturnType(); call->ClearOtherRegs(); call->ClearOtherRegFlags(); #ifndef TARGET_64BIT if (varTypeIsLong(tree)) { call->InitializeLongReturnType(); } #endif // !TARGET_64BIT #endif // FEATURE_MULTIREG_RET if (tree->OperMayThrow(this)) { tree->gtFlags |= GTF_EXCEPT; } else { tree->gtFlags &= ~GTF_EXCEPT; } tree->gtFlags |= GTF_CALL; for (GenTreeCall::Use& use : GenTreeCall::UseList(args)) { tree->gtFlags |= (use.GetNode()->gtFlags & GTF_ALL_EFFECT); } /* Perform the morphing */ if (morphArgs) { tree = fgMorphArgs(call); } return tree; } //------------------------------------------------------------------------ // fgMorphExpandCast: Performs the pre-order (required) morphing for a cast. // // Performs a rich variety of pre-order transformations (and some optimizations). // // Notably: // 1. Splits long -> small type casts into long -> int -> small type // for 32 bit targets. Does the same for float/double -> small type // casts for all targets. // 2. Morphs casts not supported by the target directly into helpers. // These mostly have to do with casts from and to floating point // types, especially checked ones. Refer to the implementation for // what specific casts need to be handled - it is a complex matrix. // 3. "Casts away" the GC-ness of a tree (for CAST(nint <- byref)) via // assigning the GC tree to an inline - COMMA(ASG, LCL_VAR) - non-GC // temporary. // 3. "Pushes down" truncating long -> int casts for some operations: // CAST(int <- MUL(long, long)) => MUL(CAST(int <- long), CAST(int <- long)). // The purpose of this is to allow "optNarrowTree" in the post-order // traversal to fold the tree into a TYP_INT one, which helps 32 bit // targets (and AMD64 too since 32 bit instructions are more compact). // TODO-Arm64-CQ: Re-evaluate the value of this optimization for ARM64. // // Arguments: // tree - the cast tree to morph // // Return Value: // The fully morphed tree, or "nullptr" if it needs further morphing, // in which case the cast may be transformed into an unchecked one // and its operand changed (the cast "expanded" into two). // GenTree* Compiler::fgMorphExpandCast(GenTreeCast* tree) { GenTree* oper = tree->CastOp(); if (fgGlobalMorph && (oper->gtOper == GT_ADDR)) { // Make sure we've checked if 'oper' is an address of an implicit-byref parameter. // If it is, fgMorphImplicitByRefArgs will change its type, and we want the cast // morphing code to see that type. fgMorphImplicitByRefArgs(oper); } var_types srcType = genActualType(oper); var_types dstType = tree->CastToType(); unsigned dstSize = genTypeSize(dstType); // See if the cast has to be done in two steps. R -> I if (varTypeIsFloating(srcType) && varTypeIsIntegral(dstType)) { if (srcType == TYP_FLOAT #if defined(TARGET_ARM64) // Arm64: src = float, dst is overflow conversion. // This goes through helper and hence src needs to be converted to double. && tree->gtOverflow() #elif defined(TARGET_AMD64) // Amd64: src = float, dst = uint64 or overflow conversion. // This goes through helper and hence src needs to be converted to double. && (tree->gtOverflow() || (dstType == TYP_ULONG)) #elif defined(TARGET_ARM) // Arm: src = float, dst = int64/uint64 or overflow conversion. && (tree->gtOverflow() || varTypeIsLong(dstType)) #else // x86: src = float, dst = uint32/int64/uint64 or overflow conversion. && (tree->gtOverflow() || varTypeIsLong(dstType) || (dstType == TYP_UINT)) #endif ) { oper = gtNewCastNode(TYP_DOUBLE, oper, false, TYP_DOUBLE); } // Do we need to do it in two steps R -> I -> smallType? if (dstSize < genTypeSize(TYP_INT)) { oper = gtNewCastNodeL(TYP_INT, oper, /* fromUnsigned */ false, TYP_INT); oper->gtFlags |= (tree->gtFlags & (GTF_OVERFLOW | GTF_EXCEPT)); tree->AsCast()->CastOp() = oper; // We must not mistreat the original cast, which was from a floating point type, // as from an unsigned type, since we now have a TYP_INT node for the source and // CAST_OVF(BYTE <- INT) != CAST_OVF(BYTE <- UINT). assert(!tree->IsUnsigned()); } else { if (!tree->gtOverflow()) { #ifdef TARGET_ARM64 // ARM64 supports all non-overflow checking conversions directly. return nullptr; #else switch (dstType) { case TYP_INT: return nullptr; case TYP_UINT: #if defined(TARGET_ARM) || defined(TARGET_AMD64) return nullptr; #else // TARGET_X86 return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2UINT, oper); #endif // TARGET_X86 case TYP_LONG: #ifdef TARGET_AMD64 // SSE2 has instructions to convert a float/double directly to a long return nullptr; #else // !TARGET_AMD64 return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2LNG, oper); #endif // !TARGET_AMD64 case TYP_ULONG: return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2ULNG, oper); default: unreached(); } #endif // TARGET_ARM64 } else { switch (dstType) { case TYP_INT: return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2INT_OVF, oper); case TYP_UINT: return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2UINT_OVF, oper); case TYP_LONG: return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2LNG_OVF, oper); case TYP_ULONG: return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2ULNG_OVF, oper); default: unreached(); } } } } #ifndef TARGET_64BIT // The code generation phase (for x86 & ARM32) does not handle casts // directly from [u]long to anything other than [u]int. Insert an // intermediate cast to native int. else if (varTypeIsLong(srcType) && varTypeIsSmall(dstType)) { oper = gtNewCastNode(TYP_I_IMPL, oper, tree->IsUnsigned(), TYP_I_IMPL); oper->gtFlags |= (tree->gtFlags & (GTF_OVERFLOW | GTF_EXCEPT)); tree->ClearUnsigned(); tree->AsCast()->CastOp() = oper; } #endif //! TARGET_64BIT #ifdef TARGET_ARMARCH // AArch, unlike x86/amd64, has instructions that can cast directly from // all integers (except for longs on AArch32 of course) to floats. // Because there is no IL instruction conv.r4.un, uint/ulong -> float // casts are always imported as CAST(float <- CAST(double <- uint/ulong)). // We can eliminate the redundant intermediate cast as an optimization. else if ((dstType == TYP_FLOAT) && (srcType == TYP_DOUBLE) && oper->OperIs(GT_CAST) #ifdef TARGET_ARM && !varTypeIsLong(oper->AsCast()->CastOp()) #endif ) { oper->gtType = TYP_FLOAT; oper->CastToType() = TYP_FLOAT; return fgMorphTree(oper); } #endif // TARGET_ARMARCH #ifdef TARGET_ARM // converts long/ulong --> float/double casts into helper calls. else if (varTypeIsFloating(dstType) && varTypeIsLong(srcType)) { if (dstType == TYP_FLOAT) { // there is only a double helper, so we // - change the dsttype to double // - insert a cast from double to float // - recurse into the resulting tree tree->CastToType() = TYP_DOUBLE; tree->gtType = TYP_DOUBLE; tree = gtNewCastNode(TYP_FLOAT, tree, false, TYP_FLOAT); return fgMorphTree(tree); } if (tree->gtFlags & GTF_UNSIGNED) return fgMorphCastIntoHelper(tree, CORINFO_HELP_ULNG2DBL, oper); return fgMorphCastIntoHelper(tree, CORINFO_HELP_LNG2DBL, oper); } #endif // TARGET_ARM #ifdef TARGET_AMD64 // Do we have to do two step U4/8 -> R4/8 ? // Codegen supports the following conversion as one-step operation // a) Long -> R4/R8 // b) U8 -> R8 // // The following conversions are performed as two-step operations using above. // U4 -> R4/8 = U4-> Long -> R4/8 // U8 -> R4 = U8 -> R8 -> R4 else if (tree->IsUnsigned() && varTypeIsFloating(dstType)) { srcType = varTypeToUnsigned(srcType); if (srcType == TYP_ULONG) { if (dstType == TYP_FLOAT) { // Codegen can handle U8 -> R8 conversion. // U8 -> R4 = U8 -> R8 -> R4 // - change the dsttype to double // - insert a cast from double to float // - recurse into the resulting tree tree->CastToType() = TYP_DOUBLE; tree->gtType = TYP_DOUBLE; tree = gtNewCastNode(TYP_FLOAT, tree, false, TYP_FLOAT); return fgMorphTree(tree); } } else if (srcType == TYP_UINT) { oper = gtNewCastNode(TYP_LONG, oper, true, TYP_LONG); oper->gtFlags |= (tree->gtFlags & (GTF_OVERFLOW | GTF_EXCEPT)); tree->ClearUnsigned(); tree->CastOp() = oper; } } #endif // TARGET_AMD64 #ifdef TARGET_X86 // Do we have to do two step U4/8 -> R4/8 ? else if (tree->IsUnsigned() && varTypeIsFloating(dstType)) { srcType = varTypeToUnsigned(srcType); if (srcType == TYP_ULONG) { return fgMorphCastIntoHelper(tree, CORINFO_HELP_ULNG2DBL, oper); } else if (srcType == TYP_UINT) { oper = gtNewCastNode(TYP_LONG, oper, true, TYP_LONG); oper->gtFlags |= (tree->gtFlags & (GTF_OVERFLOW | GTF_EXCEPT)); tree->gtFlags &= ~GTF_UNSIGNED; return fgMorphCastIntoHelper(tree, CORINFO_HELP_LNG2DBL, oper); } } else if (((tree->gtFlags & GTF_UNSIGNED) == 0) && (srcType == TYP_LONG) && varTypeIsFloating(dstType)) { oper = fgMorphCastIntoHelper(tree, CORINFO_HELP_LNG2DBL, oper); // Since we don't have a Jit Helper that converts to a TYP_FLOAT // we just use the one that converts to a TYP_DOUBLE // and then add a cast to TYP_FLOAT // if ((dstType == TYP_FLOAT) && (oper->OperGet() == GT_CALL)) { // Fix the return type to be TYP_DOUBLE // oper->gtType = TYP_DOUBLE; // Add a Cast to TYP_FLOAT // tree = gtNewCastNode(TYP_FLOAT, oper, false, TYP_FLOAT); INDEBUG(tree->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED); return tree; } else { return oper; } } #endif // TARGET_X86 else if (varTypeIsGC(srcType) != varTypeIsGC(dstType)) { // We are casting away GC information. we would like to just // change the type to int, however this gives the emitter fits because // it believes the variable is a GC variable at the beginning of the // instruction group, but is not turned non-gc by the code generator // we fix this by copying the GC pointer to a non-gc pointer temp. noway_assert(!varTypeIsGC(dstType) && "How can we have a cast to a GCRef here?"); // We generate an assignment to an int and then do the cast from an int. With this we avoid // the gc problem and we allow casts to bytes, longs, etc... unsigned lclNum = lvaGrabTemp(true DEBUGARG("Cast away GC")); oper->gtType = TYP_I_IMPL; GenTree* asg = gtNewTempAssign(lclNum, oper); oper->gtType = srcType; // do the real cast GenTree* cast = gtNewCastNode(tree->TypeGet(), gtNewLclvNode(lclNum, TYP_I_IMPL), false, dstType); // Generate the comma tree oper = gtNewOperNode(GT_COMMA, tree->TypeGet(), asg, cast); return fgMorphTree(oper); } // Look for narrowing casts ([u]long -> [u]int) and try to push them // down into the operand before morphing it. // // It doesn't matter if this is cast is from ulong or long (i.e. if // GTF_UNSIGNED is set) because the transformation is only applied to // overflow-insensitive narrowing casts, which always silently truncate. // // Note that casts from [u]long to small integer types are handled above. if ((srcType == TYP_LONG) && ((dstType == TYP_INT) || (dstType == TYP_UINT))) { // As a special case, look for overflow-sensitive casts of an AND // expression, and see if the second operand is a small constant. Since // the result of an AND is bound by its smaller operand, it may be // possible to prove that the cast won't overflow, which will in turn // allow the cast's operand to be transformed. if (tree->gtOverflow() && (oper->OperGet() == GT_AND)) { GenTree* andOp2 = oper->AsOp()->gtOp2; // Look for a constant less than 2^{32} for a cast to uint, or less // than 2^{31} for a cast to int. int maxWidth = (dstType == TYP_UINT) ? 32 : 31; if ((andOp2->OperGet() == GT_CNS_NATIVELONG) && ((andOp2->AsIntConCommon()->LngValue() >> maxWidth) == 0)) { tree->ClearOverflow(); tree->SetAllEffectsFlags(oper); } } // Only apply this transformation during global morph, // when neither the cast node nor the oper node may throw an exception // based on the upper 32 bits. // if (fgGlobalMorph && !tree->gtOverflow() && !oper->gtOverflowEx()) { // For these operations the lower 32 bits of the result only depends // upon the lower 32 bits of the operands. // bool canPushCast = oper->OperIs(GT_ADD, GT_SUB, GT_MUL, GT_AND, GT_OR, GT_XOR, GT_NOT, GT_NEG); // For long LSH cast to int, there is a discontinuity in behavior // when the shift amount is 32 or larger. // // CAST(INT, LSH(1LL, 31)) == LSH(1, 31) // LSH(CAST(INT, 1LL), CAST(INT, 31)) == LSH(1, 31) // // CAST(INT, LSH(1LL, 32)) == 0 // LSH(CAST(INT, 1LL), CAST(INT, 32)) == LSH(1, 32) == LSH(1, 0) == 1 // // So some extra validation is needed. // if (oper->OperIs(GT_LSH)) { GenTree* shiftAmount = oper->AsOp()->gtOp2; // Expose constant value for shift, if possible, to maximize the number // of cases we can handle. shiftAmount = gtFoldExpr(shiftAmount); oper->AsOp()->gtOp2 = shiftAmount; #if DEBUG // We may remorph the shift amount tree again later, so clear any morphed flag. shiftAmount->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED; #endif // DEBUG if (shiftAmount->IsIntegralConst()) { const ssize_t shiftAmountValue = shiftAmount->AsIntCon()->IconValue(); if ((shiftAmountValue >= 64) || (shiftAmountValue < 0)) { // Shift amount is large enough or negative so result is undefined. // Don't try to optimize. assert(!canPushCast); } else if (shiftAmountValue >= 32) { // We know that we have a narrowing cast ([u]long -> [u]int) // and that we are casting to a 32-bit value, which will result in zero. // // Check to see if we have any side-effects that we must keep // if ((tree->gtFlags & GTF_ALL_EFFECT) == 0) { // Result of the shift is zero. DEBUG_DESTROY_NODE(tree); GenTree* zero = gtNewZeroConNode(TYP_INT); return fgMorphTree(zero); } else // We do have a side-effect { // We could create a GT_COMMA node here to keep the side-effect and return a zero // Instead we just don't try to optimize this case. canPushCast = false; } } else { // Shift amount is positive and small enough that we can push the cast through. canPushCast = true; } } else { // Shift amount is unknown. We can't optimize this case. assert(!canPushCast); } } if (canPushCast) { DEBUG_DESTROY_NODE(tree); // Insert narrowing casts for op1 and op2. oper->AsOp()->gtOp1 = gtNewCastNode(TYP_INT, oper->AsOp()->gtOp1, false, dstType); if (oper->AsOp()->gtOp2 != nullptr) { oper->AsOp()->gtOp2 = gtNewCastNode(TYP_INT, oper->AsOp()->gtOp2, false, dstType); } // Clear the GT_MUL_64RSLT if it is set. if (oper->gtOper == GT_MUL && (oper->gtFlags & GTF_MUL_64RSLT)) { oper->gtFlags &= ~GTF_MUL_64RSLT; } // The operation now produces a 32-bit result. oper->gtType = TYP_INT; // Remorph the new tree as the casts that we added may be folded away. return fgMorphTree(oper); } } } return nullptr; } #ifdef DEBUG const char* getNonStandardArgKindName(NonStandardArgKind kind) { switch (kind) { case NonStandardArgKind::None: return "None"; case NonStandardArgKind::PInvokeFrame: return "PInvokeFrame"; case NonStandardArgKind::PInvokeTarget: return "PInvokeTarget"; case NonStandardArgKind::PInvokeCookie: return "PInvokeCookie"; case NonStandardArgKind::WrapperDelegateCell: return "WrapperDelegateCell"; case NonStandardArgKind::ShiftLow: return "ShiftLow"; case NonStandardArgKind::ShiftHigh: return "ShiftHigh"; case NonStandardArgKind::FixedRetBuffer: return "FixedRetBuffer"; case NonStandardArgKind::VirtualStubCell: return "VirtualStubCell"; case NonStandardArgKind::R2RIndirectionCell: return "R2RIndirectionCell"; case NonStandardArgKind::ValidateIndirectCallTarget: return "ValidateIndirectCallTarget"; default: unreached(); } } void fgArgTabEntry::Dump() const { printf("fgArgTabEntry[arg %u", argNum); printf(" %d.%s", GetNode()->gtTreeID, GenTree::OpName(GetNode()->OperGet())); printf(" %s", varTypeName(argType)); printf(" (%s)", passedByRef ? "By ref" : "By value"); if (GetRegNum() != REG_STK) { printf(", %u reg%s:", numRegs, numRegs == 1 ? "" : "s"); for (unsigned i = 0; i < numRegs; i++) { printf(" %s", getRegName(regNums[i])); } } if (GetStackByteSize() > 0) { #if defined(DEBUG_ARG_SLOTS) printf(", numSlots=%u, slotNum=%u, byteSize=%u, byteOffset=%u", numSlots, slotNum, m_byteSize, m_byteOffset); #else printf(", byteSize=%u, byteOffset=%u", m_byteSize, m_byteOffset); #endif } printf(", byteAlignment=%u", m_byteAlignment); if (isLateArg()) { printf(", lateArgInx=%u", GetLateArgInx()); } if (IsSplit()) { printf(", isSplit"); } if (needTmp) { printf(", tmpNum=V%02u", tmpNum); } if (needPlace) { printf(", needPlace"); } if (isTmp) { printf(", isTmp"); } if (processed) { printf(", processed"); } if (IsHfaRegArg()) { printf(", isHfa(%s)", varTypeName(GetHfaType())); } if (isBackFilled) { printf(", isBackFilled"); } if (nonStandardArgKind != NonStandardArgKind::None) { printf(", nonStandard[%s]", getNonStandardArgKindName(nonStandardArgKind)); } if (isStruct) { printf(", isStruct"); } printf("]\n"); } #endif fgArgInfo::fgArgInfo(Compiler* comp, GenTreeCall* call, unsigned numArgs) { compiler = comp; callTree = call; argCount = 0; // filled in arg count, starts at zero DEBUG_ARG_SLOTS_ONLY(nextSlotNum = INIT_ARG_STACK_SLOT;) nextStackByteOffset = INIT_ARG_STACK_SLOT * TARGET_POINTER_SIZE; stkLevel = 0; #if defined(UNIX_X86_ABI) alignmentDone = false; stkSizeBytes = 0; padStkAlign = 0; #endif #if FEATURE_FIXED_OUT_ARGS outArgSize = 0; #endif argTableSize = numArgs; // the allocated table size hasRegArgs = false; hasStackArgs = false; argsComplete = false; argsSorted = false; needsTemps = false; if (argTableSize == 0) { argTable = nullptr; } else { argTable = new (compiler, CMK_fgArgInfoPtrArr) fgArgTabEntry*[argTableSize]; } } /***************************************************************************** * * fgArgInfo Copy Constructor * * This method needs to act like a copy constructor for fgArgInfo. * The newCall needs to have its fgArgInfo initialized such that * we have newCall that is an exact copy of the oldCall. * We have to take care since the argument information * in the argTable contains pointers that must point to the * new arguments and not the old arguments. */ fgArgInfo::fgArgInfo(GenTreeCall* newCall, GenTreeCall* oldCall) { fgArgInfo* oldArgInfo = oldCall->AsCall()->fgArgInfo; compiler = oldArgInfo->compiler; callTree = newCall; argCount = 0; // filled in arg count, starts at zero DEBUG_ARG_SLOTS_ONLY(nextSlotNum = INIT_ARG_STACK_SLOT;) nextStackByteOffset = INIT_ARG_STACK_SLOT * TARGET_POINTER_SIZE; stkLevel = oldArgInfo->stkLevel; #if defined(UNIX_X86_ABI) alignmentDone = oldArgInfo->alignmentDone; stkSizeBytes = oldArgInfo->stkSizeBytes; padStkAlign = oldArgInfo->padStkAlign; #endif #if FEATURE_FIXED_OUT_ARGS outArgSize = oldArgInfo->outArgSize; #endif argTableSize = oldArgInfo->argTableSize; argsComplete = false; argTable = nullptr; assert(oldArgInfo->argsComplete); if (argTableSize > 0) { argTable = new (compiler, CMK_fgArgInfoPtrArr) fgArgTabEntry*[argTableSize]; // Copy the old arg entries for (unsigned i = 0; i < argTableSize; i++) { argTable[i] = new (compiler, CMK_fgArgInfo) fgArgTabEntry(*oldArgInfo->argTable[i]); } // The copied arg entries contain pointers to old uses, they need // to be updated to point to new uses. if (newCall->gtCallThisArg != nullptr) { for (unsigned i = 0; i < argTableSize; i++) { if (argTable[i]->use == oldCall->gtCallThisArg) { argTable[i]->use = newCall->gtCallThisArg; break; } } } GenTreeCall::UseIterator newUse = newCall->Args().begin(); GenTreeCall::UseIterator newUseEnd = newCall->Args().end(); GenTreeCall::UseIterator oldUse = oldCall->Args().begin(); GenTreeCall::UseIterator oldUseEnd = newCall->Args().end(); for (; newUse != newUseEnd; ++newUse, ++oldUse) { for (unsigned i = 0; i < argTableSize; i++) { if (argTable[i]->use == oldUse.GetUse()) { argTable[i]->use = newUse.GetUse(); break; } } } newUse = newCall->LateArgs().begin(); newUseEnd = newCall->LateArgs().end(); oldUse = oldCall->LateArgs().begin(); oldUseEnd = newCall->LateArgs().end(); for (; newUse != newUseEnd; ++newUse, ++oldUse) { for (unsigned i = 0; i < argTableSize; i++) { if (argTable[i]->lateUse == oldUse.GetUse()) { argTable[i]->lateUse = newUse.GetUse(); break; } } } } argCount = oldArgInfo->argCount; DEBUG_ARG_SLOTS_ONLY(nextSlotNum = oldArgInfo->nextSlotNum;) nextStackByteOffset = oldArgInfo->nextStackByteOffset; hasRegArgs = oldArgInfo->hasRegArgs; hasStackArgs = oldArgInfo->hasStackArgs; argsComplete = true; argsSorted = true; } void fgArgInfo::AddArg(fgArgTabEntry* curArgTabEntry) { assert(argCount < argTableSize); argTable[argCount] = curArgTabEntry; argCount++; } fgArgTabEntry* fgArgInfo::AddRegArg(unsigned argNum, GenTree* node, GenTreeCall::Use* use, regNumber regNum, unsigned numRegs, unsigned byteSize, unsigned byteAlignment, bool isStruct, bool isFloatHfa, bool isVararg /*=false*/) { fgArgTabEntry* curArgTabEntry = new (compiler, CMK_fgArgInfo) fgArgTabEntry; // Any additional register numbers are set by the caller. // This is primarily because on ARM we don't yet know if it // will be split or if it is a double HFA, so the number of registers // may actually be less. curArgTabEntry->setRegNum(0, regNum); curArgTabEntry->argNum = argNum; curArgTabEntry->argType = node->TypeGet(); curArgTabEntry->use = use; curArgTabEntry->lateUse = nullptr; curArgTabEntry->numRegs = numRegs; #if defined(DEBUG_ARG_SLOTS) curArgTabEntry->slotNum = 0; curArgTabEntry->numSlots = 0; #endif curArgTabEntry->SetLateArgInx(UINT_MAX); curArgTabEntry->tmpNum = BAD_VAR_NUM; curArgTabEntry->SetSplit(false); curArgTabEntry->isTmp = false; curArgTabEntry->needTmp = false; curArgTabEntry->needPlace = false; curArgTabEntry->processed = false; if (GlobalJitOptions::compFeatureHfa) { curArgTabEntry->SetHfaElemKind(CORINFO_HFA_ELEM_NONE); } curArgTabEntry->isBackFilled = false; curArgTabEntry->nonStandardArgKind = NonStandardArgKind::None; curArgTabEntry->isStruct = isStruct; curArgTabEntry->SetIsVararg(isVararg); curArgTabEntry->SetByteAlignment(byteAlignment); curArgTabEntry->SetByteSize(byteSize, isStruct, isFloatHfa); curArgTabEntry->SetByteOffset(0); hasRegArgs = true; if (argCount >= argTableSize) { fgArgTabEntry** oldTable = argTable; argTable = new (compiler, CMK_fgArgInfoPtrArr) fgArgTabEntry*[argCount + 1]; memcpy(argTable, oldTable, argCount * sizeof(fgArgTabEntry*)); argTableSize++; } AddArg(curArgTabEntry); return curArgTabEntry; } #if defined(UNIX_AMD64_ABI) fgArgTabEntry* fgArgInfo::AddRegArg(unsigned argNum, GenTree* node, GenTreeCall::Use* use, regNumber regNum, unsigned numRegs, unsigned byteSize, unsigned byteAlignment, const bool isStruct, const bool isFloatHfa, const bool isVararg, const regNumber otherRegNum, const unsigned structIntRegs, const unsigned structFloatRegs, const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* const structDescPtr) { fgArgTabEntry* curArgTabEntry = AddRegArg(argNum, node, use, regNum, numRegs, byteSize, byteAlignment, isStruct, isFloatHfa, isVararg); assert(curArgTabEntry != nullptr); curArgTabEntry->isStruct = isStruct; // is this a struct arg curArgTabEntry->structIntRegs = structIntRegs; curArgTabEntry->structFloatRegs = structFloatRegs; INDEBUG(curArgTabEntry->checkIsStruct();) assert(numRegs <= 2); if (numRegs == 2) { curArgTabEntry->setRegNum(1, otherRegNum); } if (isStruct && structDescPtr != nullptr) { curArgTabEntry->structDesc.CopyFrom(*structDescPtr); } return curArgTabEntry; } #endif // defined(UNIX_AMD64_ABI) fgArgTabEntry* fgArgInfo::AddStkArg(unsigned argNum, GenTree* node, GenTreeCall::Use* use, unsigned numSlots, unsigned byteSize, unsigned byteAlignment, bool isStruct, bool isFloatHfa, bool isVararg /*=false*/) { fgArgTabEntry* curArgTabEntry = new (compiler, CMK_fgArgInfo) fgArgTabEntry; #if defined(DEBUG_ARG_SLOTS) if (!compMacOsArm64Abi()) { nextSlotNum = roundUp(nextSlotNum, byteAlignment / TARGET_POINTER_SIZE); } #endif nextStackByteOffset = roundUp(nextStackByteOffset, byteAlignment); DEBUG_ARG_SLOTS_ASSERT(nextStackByteOffset / TARGET_POINTER_SIZE == nextSlotNum); curArgTabEntry->setRegNum(0, REG_STK); curArgTabEntry->argNum = argNum; curArgTabEntry->argType = node->TypeGet(); curArgTabEntry->use = use; curArgTabEntry->lateUse = nullptr; #if defined(DEBUG_ARG_SLOTS) curArgTabEntry->numSlots = numSlots; curArgTabEntry->slotNum = nextSlotNum; #endif curArgTabEntry->numRegs = 0; #if defined(UNIX_AMD64_ABI) curArgTabEntry->structIntRegs = 0; curArgTabEntry->structFloatRegs = 0; #endif // defined(UNIX_AMD64_ABI) curArgTabEntry->SetLateArgInx(UINT_MAX); curArgTabEntry->tmpNum = BAD_VAR_NUM; curArgTabEntry->SetSplit(false); curArgTabEntry->isTmp = false; curArgTabEntry->needTmp = false; curArgTabEntry->needPlace = false; curArgTabEntry->processed = false; if (GlobalJitOptions::compFeatureHfa) { curArgTabEntry->SetHfaElemKind(CORINFO_HFA_ELEM_NONE); } curArgTabEntry->isBackFilled = false; curArgTabEntry->nonStandardArgKind = NonStandardArgKind::None; curArgTabEntry->isStruct = isStruct; curArgTabEntry->SetIsVararg(isVararg); curArgTabEntry->SetByteAlignment(byteAlignment); curArgTabEntry->SetByteSize(byteSize, isStruct, isFloatHfa); curArgTabEntry->SetByteOffset(nextStackByteOffset); hasStackArgs = true; AddArg(curArgTabEntry); DEBUG_ARG_SLOTS_ONLY(nextSlotNum += numSlots;) nextStackByteOffset += curArgTabEntry->GetByteSize(); return curArgTabEntry; } void fgArgInfo::RemorphReset() { DEBUG_ARG_SLOTS_ONLY(nextSlotNum = INIT_ARG_STACK_SLOT;) nextStackByteOffset = INIT_ARG_STACK_SLOT * TARGET_POINTER_SIZE; } //------------------------------------------------------------------------ // UpdateRegArg: Update the given fgArgTabEntry while morphing. // // Arguments: // curArgTabEntry - the fgArgTabEntry to update. // node - the tree node that defines the argument // reMorphing - a boolean value indicate whether we are remorphing the call // // Assumptions: // This must have already been determined to be at least partially passed in registers. // void fgArgInfo::UpdateRegArg(fgArgTabEntry* curArgTabEntry, GenTree* node, bool reMorphing) { bool isLateArg = curArgTabEntry->isLateArg(); // If this is a late arg, we'd better be updating it with a correctly marked node, and vice-versa. assert((isLateArg && ((node->gtFlags & GTF_LATE_ARG) != 0)) || (!isLateArg && ((node->gtFlags & GTF_LATE_ARG) == 0))); assert(curArgTabEntry->numRegs != 0); assert(curArgTabEntry->use->GetNode() == node); } //------------------------------------------------------------------------ // UpdateStkArg: Update the given fgArgTabEntry while morphing. // // Arguments: // curArgTabEntry - the fgArgTabEntry to update. // node - the tree node that defines the argument // reMorphing - a boolean value indicate whether we are remorphing the call // // Assumptions: // This must have already been determined to be passed on the stack. // void fgArgInfo::UpdateStkArg(fgArgTabEntry* curArgTabEntry, GenTree* node, bool reMorphing) { bool isLateArg = curArgTabEntry->isLateArg(); // If this is a late arg, we'd better be updating it with a correctly marked node, and vice-versa. assert((isLateArg && ((node->gtFlags & GTF_LATE_ARG) != 0)) || (!isLateArg && ((node->gtFlags & GTF_LATE_ARG) == 0))); noway_assert(curArgTabEntry->use != callTree->gtCallThisArg); assert((curArgTabEntry->GetRegNum() == REG_STK) || curArgTabEntry->IsSplit()); assert(curArgTabEntry->use->GetNode() == node); #if defined(DEBUG_ARG_SLOTS) if (!compMacOsArm64Abi()) { nextSlotNum = roundUp(nextSlotNum, curArgTabEntry->GetByteAlignment() / TARGET_POINTER_SIZE); assert(curArgTabEntry->slotNum == nextSlotNum); nextSlotNum += curArgTabEntry->numSlots; } #endif nextStackByteOffset = roundUp(nextStackByteOffset, curArgTabEntry->GetByteAlignment()); assert(curArgTabEntry->GetByteOffset() == nextStackByteOffset); nextStackByteOffset += curArgTabEntry->GetStackByteSize(); } void fgArgInfo::SplitArg(unsigned argNum, unsigned numRegs, unsigned numSlots) { fgArgTabEntry* curArgTabEntry = nullptr; assert(argNum < argCount); for (unsigned inx = 0; inx < argCount; inx++) { curArgTabEntry = argTable[inx]; if (curArgTabEntry->argNum == argNum) { break; } } assert(numRegs > 0); assert(numSlots > 0); if (argsComplete) { assert(curArgTabEntry->IsSplit() == true); assert(curArgTabEntry->numRegs == numRegs); DEBUG_ARG_SLOTS_ONLY(assert(curArgTabEntry->numSlots == numSlots);) assert(hasStackArgs == true); } else { curArgTabEntry->SetSplit(true); curArgTabEntry->numRegs = numRegs; DEBUG_ARG_SLOTS_ONLY(curArgTabEntry->numSlots = numSlots;) curArgTabEntry->SetByteOffset(0); hasStackArgs = true; } DEBUG_ARG_SLOTS_ONLY(nextSlotNum += numSlots;) // TODO-Cleanup: structs are aligned to 8 bytes on arm64 apple, so it would work, but pass the precise size. nextStackByteOffset += numSlots * TARGET_POINTER_SIZE; } //------------------------------------------------------------------------ // EvalToTmp: Replace the node in the given fgArgTabEntry with a temp // // Arguments: // curArgTabEntry - the fgArgTabEntry for the argument // tmpNum - the varNum for the temp // newNode - the assignment of the argument value to the temp // // Notes: // Although the name of this method is EvalToTmp, it doesn't actually create // the temp or the copy. // void fgArgInfo::EvalToTmp(fgArgTabEntry* curArgTabEntry, unsigned tmpNum, GenTree* newNode) { assert(curArgTabEntry->use != callTree->gtCallThisArg); assert(curArgTabEntry->use->GetNode() == newNode); assert(curArgTabEntry->GetNode() == newNode); curArgTabEntry->tmpNum = tmpNum; curArgTabEntry->isTmp = true; } void fgArgInfo::ArgsComplete() { bool hasStructRegArg = false; for (unsigned curInx = 0; curInx < argCount; curInx++) { fgArgTabEntry* curArgTabEntry = argTable[curInx]; assert(curArgTabEntry != nullptr); GenTree* argx = curArgTabEntry->GetNode(); if (curArgTabEntry->GetRegNum() == REG_STK) { assert(hasStackArgs == true); #if !FEATURE_FIXED_OUT_ARGS // On x86 we use push instructions to pass arguments: // The non-register arguments are evaluated and pushed in order // and they are never evaluated into temps // continue; #endif } #if FEATURE_ARG_SPLIT else if (curArgTabEntry->IsSplit()) { hasStructRegArg = true; assert(hasStackArgs == true); } #endif // FEATURE_ARG_SPLIT else // we have a register argument, next we look for a struct type. { if (varTypeIsStruct(argx) UNIX_AMD64_ABI_ONLY(|| curArgTabEntry->isStruct)) { hasStructRegArg = true; } } /* If the argument tree contains an assignment (GTF_ASG) then the argument and and every earlier argument (except constants) must be evaluated into temps since there may be other arguments that follow and they may use the value being assigned. EXAMPLE: ArgTab is "a, a=5, a" -> when we see the second arg "a=5" we know the first two arguments "a, a=5" have to be evaluated into temps For the case of an assignment, we only know that there exist some assignment someplace in the tree. We don't know what is being assigned so we are very conservative here and assume that any local variable could have been assigned. */ if (argx->gtFlags & GTF_ASG) { // If this is not the only argument, or it's a copyblk, or it already evaluates the expression to // a tmp, then we need a temp in the late arg list. if ((argCount > 1) || argx->OperIsCopyBlkOp() #ifdef FEATURE_FIXED_OUT_ARGS || curArgTabEntry->isTmp // I protect this by "FEATURE_FIXED_OUT_ARGS" to preserve the property // that we only have late non-register args when that feature is on. #endif // FEATURE_FIXED_OUT_ARGS ) { curArgTabEntry->needTmp = true; needsTemps = true; } // For all previous arguments, unless they are a simple constant // we require that they be evaluated into temps for (unsigned prevInx = 0; prevInx < curInx; prevInx++) { fgArgTabEntry* prevArgTabEntry = argTable[prevInx]; assert(prevArgTabEntry->argNum < curArgTabEntry->argNum); if (!prevArgTabEntry->GetNode()->IsInvariant()) { prevArgTabEntry->needTmp = true; needsTemps = true; } } } bool treatLikeCall = ((argx->gtFlags & GTF_CALL) != 0); #if FEATURE_FIXED_OUT_ARGS // Like calls, if this argument has a tree that will do an inline throw, // a call to a jit helper, then we need to treat it like a call (but only // if there are/were any stack args). // This means unnesting, sorting, etc. Technically this is overly // conservative, but I want to avoid as much special-case debug-only code // as possible, so leveraging the GTF_CALL flag is the easiest. // if (!treatLikeCall && (argx->gtFlags & GTF_EXCEPT) && (argCount > 1) && compiler->opts.compDbgCode && (compiler->fgWalkTreePre(&argx, Compiler::fgChkThrowCB) == Compiler::WALK_ABORT)) { for (unsigned otherInx = 0; otherInx < argCount; otherInx++) { if (otherInx == curInx) { continue; } if (argTable[otherInx]->GetRegNum() == REG_STK) { treatLikeCall = true; break; } } } #endif // FEATURE_FIXED_OUT_ARGS /* If it contains a call (GTF_CALL) then itself and everything before the call with a GLOB_EFFECT must eval to temp (this is because everything with SIDE_EFFECT has to be kept in the right order since we will move the call to the first position) For calls we don't have to be quite as conservative as we are with an assignment since the call won't be modifying any non-address taken LclVars. */ if (treatLikeCall) { if (argCount > 1) // If this is not the only argument { curArgTabEntry->needTmp = true; needsTemps = true; } else if (varTypeIsFloating(argx->TypeGet()) && (argx->OperGet() == GT_CALL)) { // Spill all arguments that are floating point calls curArgTabEntry->needTmp = true; needsTemps = true; } // All previous arguments may need to be evaluated into temps for (unsigned prevInx = 0; prevInx < curInx; prevInx++) { fgArgTabEntry* prevArgTabEntry = argTable[prevInx]; assert(prevArgTabEntry->argNum < curArgTabEntry->argNum); // For all previous arguments, if they have any GTF_ALL_EFFECT // we require that they be evaluated into a temp if ((prevArgTabEntry->GetNode()->gtFlags & GTF_ALL_EFFECT) != 0) { prevArgTabEntry->needTmp = true; needsTemps = true; } #if FEATURE_FIXED_OUT_ARGS // Or, if they are stored into the FIXED_OUT_ARG area // we require that they be moved to the gtCallLateArgs // and replaced with a placeholder node else if (prevArgTabEntry->GetRegNum() == REG_STK) { prevArgTabEntry->needPlace = true; } #if FEATURE_ARG_SPLIT else if (prevArgTabEntry->IsSplit()) { prevArgTabEntry->needPlace = true; } #endif // FEATURE_ARG_SPLIT #endif } } #if FEATURE_MULTIREG_ARGS // For RyuJIT backend we will expand a Multireg arg into a GT_FIELD_LIST // with multiple indirections, so here we consider spilling it into a tmp LclVar. // CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_ARM bool isMultiRegArg = (curArgTabEntry->numRegs > 0) && (curArgTabEntry->numRegs + curArgTabEntry->GetStackSlotsNumber() > 1); #else bool isMultiRegArg = (curArgTabEntry->numRegs > 1); #endif if ((varTypeIsStruct(argx->TypeGet())) && (curArgTabEntry->needTmp == false)) { if (isMultiRegArg && ((argx->gtFlags & GTF_PERSISTENT_SIDE_EFFECTS) != 0)) { // Spill multireg struct arguments that have Assignments or Calls embedded in them curArgTabEntry->needTmp = true; needsTemps = true; } else { // We call gtPrepareCost to measure the cost of evaluating this tree compiler->gtPrepareCost(argx); if (isMultiRegArg && (argx->GetCostEx() > (6 * IND_COST_EX))) { // Spill multireg struct arguments that are expensive to evaluate twice curArgTabEntry->needTmp = true; needsTemps = true; } #if defined(FEATURE_SIMD) && defined(TARGET_ARM64) else if (isMultiRegArg && varTypeIsSIMD(argx->TypeGet())) { // SIMD types do not need the optimization below due to their sizes if (argx->OperIsSimdOrHWintrinsic() || (argx->OperIs(GT_OBJ) && argx->AsObj()->gtOp1->OperIs(GT_ADDR) && argx->AsObj()->gtOp1->AsOp()->gtOp1->OperIsSimdOrHWintrinsic())) { curArgTabEntry->needTmp = true; needsTemps = true; } } #endif #ifndef TARGET_ARM // TODO-Arm: This optimization is not implemented for ARM32 // so we skip this for ARM32 until it is ported to use RyuJIT backend // else if (argx->OperGet() == GT_OBJ) { GenTreeObj* argObj = argx->AsObj(); unsigned structSize = argObj->GetLayout()->GetSize(); switch (structSize) { case 3: case 5: case 6: case 7: // If we have a stack based LclVar we can perform a wider read of 4 or 8 bytes // if (argObj->AsObj()->gtOp1->IsLocalAddrExpr() == nullptr) // Is the source not a LclVar? { // If we don't have a LclVar we need to read exactly 3,5,6 or 7 bytes // For now we use a a GT_CPBLK to copy the exact size into a GT_LCL_VAR temp. // curArgTabEntry->needTmp = true; needsTemps = true; } break; case 11: case 13: case 14: case 15: // Spill any GT_OBJ multireg structs that are difficult to extract // // When we have a GT_OBJ of a struct with the above sizes we would need // to use 3 or 4 load instructions to load the exact size of this struct. // Instead we spill the GT_OBJ into a new GT_LCL_VAR temp and this sequence // will use a GT_CPBLK to copy the exact size into the GT_LCL_VAR temp. // Then we can just load all 16 bytes of the GT_LCL_VAR temp when passing // the argument. // curArgTabEntry->needTmp = true; needsTemps = true; break; default: break; } } #endif // !TARGET_ARM } } #endif // FEATURE_MULTIREG_ARGS } // We only care because we can't spill structs and qmarks involve a lot of spilling, but // if we don't have qmarks, then it doesn't matter. // So check for Qmark's globally once here, instead of inside the loop. // const bool hasStructRegArgWeCareAbout = (hasStructRegArg && compiler->compQmarkUsed); #if FEATURE_FIXED_OUT_ARGS // For Arm/x64 we only care because we can't reorder a register // argument that uses GT_LCLHEAP. This is an optimization to // save a check inside the below loop. // const bool hasStackArgsWeCareAbout = (hasStackArgs && compiler->compLocallocUsed); #else const bool hasStackArgsWeCareAbout = hasStackArgs; #endif // FEATURE_FIXED_OUT_ARGS // If we have any stack args we have to force the evaluation // of any arguments passed in registers that might throw an exception // // Technically we only a required to handle the following two cases: // a GT_IND with GTF_IND_RNGCHK (only on x86) or // a GT_LCLHEAP node that allocates stuff on the stack // if (hasStackArgsWeCareAbout || hasStructRegArgWeCareAbout) { for (unsigned curInx = 0; curInx < argCount; curInx++) { fgArgTabEntry* curArgTabEntry = argTable[curInx]; assert(curArgTabEntry != nullptr); GenTree* argx = curArgTabEntry->GetNode(); // Examine the register args that are currently not marked needTmp // if (!curArgTabEntry->needTmp && (curArgTabEntry->GetRegNum() != REG_STK)) { if (hasStackArgsWeCareAbout) { #if !FEATURE_FIXED_OUT_ARGS // On x86 we previously recorded a stack depth of zero when // morphing the register arguments of any GT_IND with a GTF_IND_RNGCHK flag // Thus we can not reorder the argument after any stack based argument // (Note that GT_LCLHEAP sets the GTF_EXCEPT flag so we don't need to // check for it explicitly.) // if (argx->gtFlags & GTF_EXCEPT) { curArgTabEntry->needTmp = true; needsTemps = true; continue; } #else // For Arm/X64 we can't reorder a register argument that uses a GT_LCLHEAP // if (argx->gtFlags & GTF_EXCEPT) { assert(compiler->compLocallocUsed); // Returns WALK_ABORT if a GT_LCLHEAP node is encountered in the argx tree // if (compiler->fgWalkTreePre(&argx, Compiler::fgChkLocAllocCB) == Compiler::WALK_ABORT) { curArgTabEntry->needTmp = true; needsTemps = true; continue; } } #endif } if (hasStructRegArgWeCareAbout) { // Returns true if a GT_QMARK node is encountered in the argx tree // if (compiler->fgWalkTreePre(&argx, Compiler::fgChkQmarkCB) == Compiler::WALK_ABORT) { curArgTabEntry->needTmp = true; needsTemps = true; continue; } } } } } // When CFG is enabled and this is a delegate call or vtable call we must // compute the call target before all late args. However this will // effectively null-check 'this', which should happen only after all // arguments are evaluated. Thus we must evaluate all args with side // effects to a temp. if (compiler->opts.IsCFGEnabled() && (callTree->IsVirtualVtable() || callTree->IsDelegateInvoke())) { // Always evaluate 'this' to temp. argTable[0]->needTmp = true; needsTemps = true; for (unsigned curInx = 1; curInx < argCount; curInx++) { fgArgTabEntry* curArgTabEntry = argTable[curInx]; GenTree* arg = curArgTabEntry->GetNode(); if ((arg->gtFlags & GTF_ALL_EFFECT) != 0) { curArgTabEntry->needTmp = true; needsTemps = true; } } } argsComplete = true; } void fgArgInfo::SortArgs() { assert(argsComplete == true); #ifdef DEBUG if (compiler->verbose) { printf("\nSorting the arguments:\n"); } #endif /* Shuffle the arguments around before we build the gtCallLateArgs list. The idea is to move all "simple" arguments like constants and local vars to the end of the table, and move the complex arguments towards the beginning of the table. This will help prevent registers from being spilled by allowing us to evaluate the more complex arguments before the simpler arguments. The argTable ends up looking like: +------------------------------------+ <--- argTable[argCount - 1] | constants | +------------------------------------+ | local var / local field | +------------------------------------+ | remaining arguments sorted by cost | +------------------------------------+ | temps (argTable[].needTmp = true) | +------------------------------------+ | args with calls (GTF_CALL) | +------------------------------------+ <--- argTable[0] */ /* Set the beginning and end for the new argument table */ unsigned curInx; int regCount = 0; unsigned begTab = 0; unsigned endTab = argCount - 1; unsigned argsRemaining = argCount; // First take care of arguments that are constants. // [We use a backward iterator pattern] // curInx = argCount; do { curInx--; fgArgTabEntry* curArgTabEntry = argTable[curInx]; if (curArgTabEntry->GetRegNum() != REG_STK) { regCount++; } assert(curArgTabEntry->lateUse == nullptr); // Skip any already processed args // if (!curArgTabEntry->processed) { GenTree* argx = curArgTabEntry->GetNode(); // put constants at the end of the table // if (argx->gtOper == GT_CNS_INT) { noway_assert(curInx <= endTab); curArgTabEntry->processed = true; // place curArgTabEntry at the endTab position by performing a swap // if (curInx != endTab) { argTable[curInx] = argTable[endTab]; argTable[endTab] = curArgTabEntry; } endTab--; argsRemaining--; } } } while (curInx > 0); if (argsRemaining > 0) { // Next take care of arguments that are calls. // [We use a forward iterator pattern] // for (curInx = begTab; curInx <= endTab; curInx++) { fgArgTabEntry* curArgTabEntry = argTable[curInx]; // Skip any already processed args // if (!curArgTabEntry->processed) { GenTree* argx = curArgTabEntry->GetNode(); // put calls at the beginning of the table // if (argx->gtFlags & GTF_CALL) { curArgTabEntry->processed = true; // place curArgTabEntry at the begTab position by performing a swap // if (curInx != begTab) { argTable[curInx] = argTable[begTab]; argTable[begTab] = curArgTabEntry; } begTab++; argsRemaining--; } } } } if (argsRemaining > 0) { // Next take care arguments that are temps. // These temps come before the arguments that are // ordinary local vars or local fields // since this will give them a better chance to become // enregistered into their actual argument register. // [We use a forward iterator pattern] // for (curInx = begTab; curInx <= endTab; curInx++) { fgArgTabEntry* curArgTabEntry = argTable[curInx]; // Skip any already processed args // if (!curArgTabEntry->processed) { if (curArgTabEntry->needTmp) { curArgTabEntry->processed = true; // place curArgTabEntry at the begTab position by performing a swap // if (curInx != begTab) { argTable[curInx] = argTable[begTab]; argTable[begTab] = curArgTabEntry; } begTab++; argsRemaining--; } } } } if (argsRemaining > 0) { // Next take care of local var and local field arguments. // These are moved towards the end of the argument evaluation. // [We use a backward iterator pattern] // curInx = endTab + 1; do { curInx--; fgArgTabEntry* curArgTabEntry = argTable[curInx]; // Skip any already processed args // if (!curArgTabEntry->processed) { GenTree* argx = curArgTabEntry->GetNode(); if ((argx->gtOper == GT_LCL_VAR) || (argx->gtOper == GT_LCL_FLD)) { noway_assert(curInx <= endTab); curArgTabEntry->processed = true; // place curArgTabEntry at the endTab position by performing a swap // if (curInx != endTab) { argTable[curInx] = argTable[endTab]; argTable[endTab] = curArgTabEntry; } endTab--; argsRemaining--; } } } while (curInx > begTab); } // Finally, take care of all the remaining arguments. // Note that we fill in one arg at a time using a while loop. bool costsPrepared = false; // Only prepare tree costs once, the first time through this loop while (argsRemaining > 0) { /* Find the most expensive arg remaining and evaluate it next */ fgArgTabEntry* expensiveArgTabEntry = nullptr; unsigned expensiveArg = UINT_MAX; unsigned expensiveArgCost = 0; // [We use a forward iterator pattern] // for (curInx = begTab; curInx <= endTab; curInx++) { fgArgTabEntry* curArgTabEntry = argTable[curInx]; // Skip any already processed args // if (!curArgTabEntry->processed) { GenTree* argx = curArgTabEntry->GetNode(); // We should have already handled these kinds of args assert(argx->gtOper != GT_LCL_VAR); assert(argx->gtOper != GT_LCL_FLD); assert(argx->gtOper != GT_CNS_INT); // This arg should either have no persistent side effects or be the last one in our table // assert(((argx->gtFlags & GTF_PERSISTENT_SIDE_EFFECTS) == 0) || (curInx == (argCount-1))); if (argsRemaining == 1) { // This is the last arg to place expensiveArg = curInx; expensiveArgTabEntry = curArgTabEntry; assert(begTab == endTab); break; } else { if (!costsPrepared) { /* We call gtPrepareCost to measure the cost of evaluating this tree */ compiler->gtPrepareCost(argx); } if (argx->GetCostEx() > expensiveArgCost) { // Remember this arg as the most expensive one that we have yet seen expensiveArgCost = argx->GetCostEx(); expensiveArg = curInx; expensiveArgTabEntry = curArgTabEntry; } } } } noway_assert(expensiveArg != UINT_MAX); // put the most expensive arg towards the beginning of the table expensiveArgTabEntry->processed = true; // place expensiveArgTabEntry at the begTab position by performing a swap // if (expensiveArg != begTab) { argTable[expensiveArg] = argTable[begTab]; argTable[begTab] = expensiveArgTabEntry; } begTab++; argsRemaining--; costsPrepared = true; // If we have more expensive arguments, don't re-evaluate the tree cost on the next loop } // The table should now be completely filled and thus begTab should now be adjacent to endTab // and regArgsRemaining should be zero assert(begTab == (endTab + 1)); assert(argsRemaining == 0); argsSorted = true; } #ifdef DEBUG void fgArgInfo::Dump(Compiler* compiler) const { for (unsigned curInx = 0; curInx < ArgCount(); curInx++) { fgArgTabEntry* curArgEntry = ArgTable()[curInx]; curArgEntry->Dump(); } } #endif //------------------------------------------------------------------------------ // fgMakeTmpArgNode : This function creates a tmp var only if needed. // We need this to be done in order to enforce ordering // of the evaluation of arguments. // // Arguments: // curArgTabEntry // // Return Value: // the newly created temp var tree. GenTree* Compiler::fgMakeTmpArgNode(fgArgTabEntry* curArgTabEntry) { unsigned tmpVarNum = curArgTabEntry->tmpNum; LclVarDsc* varDsc = lvaGetDesc(tmpVarNum); assert(varDsc->lvIsTemp); var_types type = varDsc->TypeGet(); // Create a copy of the temp to go into the late argument list GenTree* arg = gtNewLclvNode(tmpVarNum, type); GenTree* addrNode = nullptr; if (varTypeIsStruct(type)) { #if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_ARM) // Can this type be passed as a primitive type? // If so, the following call will return the corresponding primitive type. // Otherwise, it will return TYP_UNKNOWN and we will pass it as a struct type. bool passedAsPrimitive = false; if (curArgTabEntry->TryPassAsPrimitive()) { CORINFO_CLASS_HANDLE clsHnd = varDsc->GetStructHnd(); var_types structBaseType = getPrimitiveTypeForStruct(lvaLclExactSize(tmpVarNum), clsHnd, curArgTabEntry->IsVararg()); if (structBaseType != TYP_UNKNOWN) { passedAsPrimitive = true; #if defined(UNIX_AMD64_ABI) // TODO-Cleanup: This is inelegant, but eventually we'll track this in the fgArgTabEntry, // and otherwise we'd have to either modify getPrimitiveTypeForStruct() to take // a structDesc or call eeGetSystemVAmd64PassStructInRegisterDescriptor yet again. // if (genIsValidFloatReg(curArgTabEntry->GetRegNum())) { if (structBaseType == TYP_INT) { structBaseType = TYP_FLOAT; } else { assert(structBaseType == TYP_LONG); structBaseType = TYP_DOUBLE; } } #endif type = structBaseType; } } // If it is passed in registers, don't get the address of the var. Make it a // field instead. It will be loaded in registers with putarg_reg tree in lower. if (passedAsPrimitive) { arg->ChangeOper(GT_LCL_FLD); arg->gtType = type; lvaSetVarDoNotEnregister(tmpVarNum DEBUGARG(DoNotEnregisterReason::SwizzleArg)); } else { var_types addrType = TYP_BYREF; arg = gtNewOperNode(GT_ADDR, addrType, arg); lvaSetVarAddrExposed(tmpVarNum DEBUGARG(AddressExposedReason::ESCAPE_ADDRESS)); addrNode = arg; #if FEATURE_MULTIREG_ARGS #ifdef TARGET_ARM64 assert(varTypeIsStruct(type)); if (lvaIsMultiregStruct(varDsc, curArgTabEntry->IsVararg())) { // We will create a GT_OBJ for the argument below. // This will be passed by value in two registers. assert(addrNode != nullptr); // Create an Obj of the temp to use it as a call argument. arg = gtNewObjNode(lvaGetStruct(tmpVarNum), arg); } #else // Always create an Obj of the temp to use it as a call argument. arg = gtNewObjNode(lvaGetStruct(tmpVarNum), arg); #endif // !TARGET_ARM64 #endif // FEATURE_MULTIREG_ARGS } #else // not (TARGET_AMD64 or TARGET_ARM64 or TARGET_ARM) // other targets, we pass the struct by value assert(varTypeIsStruct(type)); addrNode = gtNewOperNode(GT_ADDR, TYP_BYREF, arg); // Get a new Obj node temp to use it as a call argument. // gtNewObjNode will set the GTF_EXCEPT flag if this is not a local stack object. arg = gtNewObjNode(lvaGetStruct(tmpVarNum), addrNode); #endif // not (TARGET_AMD64 or TARGET_ARM64 or TARGET_ARM) } // (varTypeIsStruct(type)) if (addrNode != nullptr) { assert(addrNode->gtOper == GT_ADDR); // the child of a GT_ADDR is required to have this flag set addrNode->AsOp()->gtOp1->gtFlags |= GTF_DONT_CSE; } return arg; } //------------------------------------------------------------------------------ // EvalArgsToTemps : Create temp assignments and populate the LateArgs list. void fgArgInfo::EvalArgsToTemps() { assert(argsSorted); unsigned regArgInx = 0; // Now go through the argument table and perform the necessary evaluation into temps GenTreeCall::Use* tmpRegArgNext = nullptr; for (unsigned curInx = 0; curInx < argCount; curInx++) { fgArgTabEntry* curArgTabEntry = argTable[curInx]; assert(curArgTabEntry->lateUse == nullptr); GenTree* argx = curArgTabEntry->GetNode(); GenTree* setupArg = nullptr; GenTree* defArg; #if !FEATURE_FIXED_OUT_ARGS // Only ever set for FEATURE_FIXED_OUT_ARGS assert(curArgTabEntry->needPlace == false); // On x86 and other archs that use push instructions to pass arguments: // Only the register arguments need to be replaced with placeholder nodes. // Stacked arguments are evaluated and pushed (or stored into the stack) in order. // if (curArgTabEntry->GetRegNum() == REG_STK) continue; #endif if (curArgTabEntry->needTmp) { if (curArgTabEntry->isTmp) { // Create a copy of the temp to go into the late argument list defArg = compiler->fgMakeTmpArgNode(curArgTabEntry); // mark the original node as a late argument argx->gtFlags |= GTF_LATE_ARG; } else { // Create a temp assignment for the argument // Put the temp in the gtCallLateArgs list CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if (compiler->verbose) { printf("Argument with 'side effect'...\n"); compiler->gtDispTree(argx); } #endif #if defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI) noway_assert(argx->gtType != TYP_STRUCT); #endif unsigned tmpVarNum = compiler->lvaGrabTemp(true DEBUGARG("argument with side effect")); if (argx->gtOper == GT_MKREFANY) { // For GT_MKREFANY, typically the actual struct copying does // not have any side-effects and can be delayed. So instead // of using a temp for the whole struct, we can just use a temp // for operand that that has a side-effect GenTree* operand; if ((argx->AsOp()->gtOp2->gtFlags & GTF_ALL_EFFECT) == 0) { operand = argx->AsOp()->gtOp1; // In the early argument evaluation, place an assignment to the temp // from the source operand of the mkrefany setupArg = compiler->gtNewTempAssign(tmpVarNum, operand); // Replace the operand for the mkrefany with the new temp. argx->AsOp()->gtOp1 = compiler->gtNewLclvNode(tmpVarNum, operand->TypeGet()); } else if ((argx->AsOp()->gtOp1->gtFlags & GTF_ALL_EFFECT) == 0) { operand = argx->AsOp()->gtOp2; // In the early argument evaluation, place an assignment to the temp // from the source operand of the mkrefany setupArg = compiler->gtNewTempAssign(tmpVarNum, operand); // Replace the operand for the mkrefany with the new temp. argx->AsOp()->gtOp2 = compiler->gtNewLclvNode(tmpVarNum, operand->TypeGet()); } } if (setupArg != nullptr) { // Now keep the mkrefany for the late argument list defArg = argx; // Clear the side-effect flags because now both op1 and op2 have no side-effects defArg->gtFlags &= ~GTF_ALL_EFFECT; } else { setupArg = compiler->gtNewTempAssign(tmpVarNum, argx); LclVarDsc* varDsc = compiler->lvaGetDesc(tmpVarNum); var_types lclVarType = genActualType(argx->gtType); var_types scalarType = TYP_UNKNOWN; if (setupArg->OperIsCopyBlkOp()) { setupArg = compiler->fgMorphCopyBlock(setupArg); #if defined(TARGET_ARMARCH) || defined(UNIX_AMD64_ABI) if (lclVarType == TYP_STRUCT) { // This scalar LclVar widening step is only performed for ARM architectures. // CORINFO_CLASS_HANDLE clsHnd = compiler->lvaGetStruct(tmpVarNum); unsigned structSize = varDsc->lvExactSize; scalarType = compiler->getPrimitiveTypeForStruct(structSize, clsHnd, curArgTabEntry->IsVararg()); } #endif // TARGET_ARMARCH || defined (UNIX_AMD64_ABI) } // scalarType can be set to a wider type for ARM or unix amd64 architectures: (3 => 4) or (5,6,7 => // 8) if ((scalarType != TYP_UNKNOWN) && (scalarType != lclVarType)) { // Create a GT_LCL_FLD using the wider type to go to the late argument list defArg = compiler->gtNewLclFldNode(tmpVarNum, scalarType, 0); } else { // Create a copy of the temp to go to the late argument list defArg = compiler->gtNewLclvNode(tmpVarNum, lclVarType); } curArgTabEntry->isTmp = true; curArgTabEntry->tmpNum = tmpVarNum; #ifdef TARGET_ARM // Previously we might have thought the local was promoted, and thus the 'COPYBLK' // might have left holes in the used registers (see // fgAddSkippedRegsInPromotedStructArg). // Too bad we're not that smart for these intermediate temps... if (isValidIntArgReg(curArgTabEntry->GetRegNum()) && (curArgTabEntry->numRegs > 1)) { regNumber argReg = curArgTabEntry->GetRegNum(); regMaskTP allUsedRegs = genRegMask(curArgTabEntry->GetRegNum()); for (unsigned i = 1; i < curArgTabEntry->numRegs; i++) { argReg = genRegArgNext(argReg); allUsedRegs |= genRegMask(argReg); } } #endif // TARGET_ARM } /* mark the assignment as a late argument */ setupArg->gtFlags |= GTF_LATE_ARG; #ifdef DEBUG if (compiler->verbose) { printf("\n Evaluate to a temp:\n"); compiler->gtDispTree(setupArg); } #endif } } else // curArgTabEntry->needTmp == false { // On x86 - // Only register args are replaced with placeholder nodes // and the stack based arguments are evaluated and pushed in order. // // On Arm/x64 - When needTmp is false and needPlace is false, // the non-register arguments are evaluated and stored in order. // When needPlace is true we have a nested call that comes after // this argument so we have to replace it in the gtCallArgs list // (the initial argument evaluation list) with a placeholder. // if ((curArgTabEntry->GetRegNum() == REG_STK) && (curArgTabEntry->needPlace == false)) { continue; } /* No temp needed - move the whole node to the gtCallLateArgs list */ /* The argument is deferred and put in the late argument list */ defArg = argx; // Create a placeholder node to put in its place in gtCallLateArgs. // For a struct type we also need to record the class handle of the arg. CORINFO_CLASS_HANDLE clsHnd = NO_CLASS_HANDLE; #if defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI) // All structs are either passed (and retyped) as integral types, OR they // are passed by reference. noway_assert(argx->gtType != TYP_STRUCT); #else // !defined(TARGET_AMD64) || defined(UNIX_AMD64_ABI) if (defArg->TypeGet() == TYP_STRUCT) { clsHnd = compiler->gtGetStructHandleIfPresent(defArg); noway_assert(clsHnd != NO_CLASS_HANDLE); } #endif // !(defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI)) setupArg = compiler->gtNewArgPlaceHolderNode(defArg->gtType, clsHnd); /* mark the placeholder node as a late argument */ setupArg->gtFlags |= GTF_LATE_ARG; #ifdef DEBUG if (compiler->verbose) { if (curArgTabEntry->GetRegNum() == REG_STK) { printf("Deferred stack argument :\n"); } else { printf("Deferred argument ('%s'):\n", getRegName(curArgTabEntry->GetRegNum())); } compiler->gtDispTree(argx); printf("Replaced with placeholder node:\n"); compiler->gtDispTree(setupArg); } #endif } if (setupArg != nullptr) { noway_assert(curArgTabEntry->use->GetNode() == argx); curArgTabEntry->use->SetNode(setupArg); } /* deferred arg goes into the late argument list */ if (tmpRegArgNext == nullptr) { tmpRegArgNext = compiler->gtNewCallArgs(defArg); callTree->AsCall()->gtCallLateArgs = tmpRegArgNext; } else { noway_assert(tmpRegArgNext->GetNode() != nullptr); tmpRegArgNext->SetNext(compiler->gtNewCallArgs(defArg)); tmpRegArgNext = tmpRegArgNext->GetNext(); } curArgTabEntry->lateUse = tmpRegArgNext; curArgTabEntry->SetLateArgInx(regArgInx++); } #ifdef DEBUG if (compiler->verbose) { printf("\nShuffled argument table: "); for (unsigned curInx = 0; curInx < argCount; curInx++) { fgArgTabEntry* curArgTabEntry = argTable[curInx]; if (curArgTabEntry->GetRegNum() != REG_STK) { printf("%s ", getRegName(curArgTabEntry->GetRegNum())); } } printf("\n"); } #endif } //------------------------------------------------------------------------------ // fgMakeMultiUse : If the node is an unaliased local or constant clone it, // otherwise insert a comma form temp // // Arguments: // ppTree - a pointer to the child node we will be replacing with the comma expression that // evaluates ppTree to a temp and returns the result // // Return Value: // A fresh GT_LCL_VAR node referencing the temp which has not been used // // Notes: // Caller must ensure that if the node is an unaliased local, the second use this // creates will be evaluated before the local can be reassigned. // // Can be safely called in morph preorder, before GTF_GLOB_REF is reliable. // GenTree* Compiler::fgMakeMultiUse(GenTree** pOp) { GenTree* const tree = *pOp; if (tree->IsInvariant()) { return gtClone(tree); } else if (tree->IsLocal()) { // Can't rely on GTF_GLOB_REF here. // if (!lvaGetDesc(tree->AsLclVarCommon())->IsAddressExposed()) { return gtClone(tree); } } return fgInsertCommaFormTemp(pOp); } //------------------------------------------------------------------------------ // fgInsertCommaFormTemp: Create a new temporary variable to hold the result of *ppTree, // and replace *ppTree with comma(asg(newLcl, *ppTree), newLcl) // // Arguments: // ppTree - a pointer to the child node we will be replacing with the comma expression that // evaluates ppTree to a temp and returns the result // // structType - value type handle if the temp created is of TYP_STRUCT. // // Return Value: // A fresh GT_LCL_VAR node referencing the temp which has not been used // GenTree* Compiler::fgInsertCommaFormTemp(GenTree** ppTree, CORINFO_CLASS_HANDLE structType /*= nullptr*/) { GenTree* subTree = *ppTree; unsigned lclNum = lvaGrabTemp(true DEBUGARG("fgInsertCommaFormTemp is creating a new local variable")); if (varTypeIsStruct(subTree)) { assert(structType != nullptr); lvaSetStruct(lclNum, structType, false); } // If subTree->TypeGet() == TYP_STRUCT, gtNewTempAssign() will create a GT_COPYBLK tree. // The type of GT_COPYBLK is TYP_VOID. Therefore, we should use subTree->TypeGet() for // setting type of lcl vars created. GenTree* asg = gtNewTempAssign(lclNum, subTree); GenTree* load = new (this, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, subTree->TypeGet(), lclNum); GenTree* comma = gtNewOperNode(GT_COMMA, subTree->TypeGet(), asg, load); *ppTree = comma; return new (this, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, subTree->TypeGet(), lclNum); } //------------------------------------------------------------------------ // fgInitArgInfo: Construct the fgArgInfo for the call with the fgArgEntry for each arg // // Arguments: // callNode - the call for which we are generating the fgArgInfo // // Return Value: // None // // Notes: // This method is idempotent in that it checks whether the fgArgInfo has already been // constructed, and just returns. // This method only computes the arg table and arg entries for the call (the fgArgInfo), // and makes no modification of the args themselves. // // The IR for the call args can change for calls with non-standard arguments: some non-standard // arguments add new call argument IR nodes. // void Compiler::fgInitArgInfo(GenTreeCall* call) { GenTreeCall::Use* args; GenTree* argx; unsigned argIndex = 0; unsigned intArgRegNum = 0; unsigned fltArgRegNum = 0; DEBUG_ARG_SLOTS_ONLY(unsigned argSlots = 0;) bool callHasRetBuffArg = call->HasRetBufArg(); bool callIsVararg = call->IsVarargs(); #ifdef TARGET_ARM regMaskTP argSkippedRegMask = RBM_NONE; regMaskTP fltArgSkippedRegMask = RBM_NONE; #endif // TARGET_ARM #if defined(TARGET_X86) unsigned maxRegArgs = MAX_REG_ARG; // X86: non-const, must be calculated #else const unsigned maxRegArgs = MAX_REG_ARG; // other arch: fixed constant number #endif if (call->fgArgInfo != nullptr) { // We've already initialized and set the fgArgInfo. return; } JITDUMP("Initializing arg info for %d.%s:\n", call->gtTreeID, GenTree::OpName(call->gtOper)); // At this point, we should never have gtCallLateArgs, as this needs to be done before those are determined. assert(call->gtCallLateArgs == nullptr); if (TargetOS::IsUnix && callIsVararg) { // Currently native varargs is not implemented on non windows targets. // // Note that some targets like Arm64 Unix should not need much work as // the ABI is the same. While other targets may only need small changes // such as amd64 Unix, which just expects RAX to pass numFPArguments. NYI("Morphing Vararg call not yet implemented on non Windows targets."); } // Data structure for keeping track of non-standard args. Non-standard args are those that are not passed // following the normal calling convention or in the normal argument registers. We either mark existing // arguments as non-standard (such as the x8 return buffer register on ARM64), or we manually insert the // non-standard arguments into the argument list, below. class NonStandardArgs { struct NonStandardArg { GenTree* node; // The tree node representing this non-standard argument. // Note that this must be updated if the tree node changes due to morphing! regNumber reg; // The register to be assigned to this non-standard argument. NonStandardArgKind kind; // The kind of the non-standard arg }; ArrayStack<NonStandardArg> args; public: NonStandardArgs(CompAllocator alloc) : args(alloc, 3) // We will have at most 3 non-standard arguments { } //----------------------------------------------------------------------------- // Add: add a non-standard argument to the table of non-standard arguments // // Arguments: // node - a GenTree node that has a non-standard argument. // reg - the register to assign to this node. // // Return Value: // None. // void Add(GenTree* node, regNumber reg, NonStandardArgKind kind) { NonStandardArg nsa = {node, reg, kind}; args.Push(nsa); } //----------------------------------------------------------------------------- // Find: Look for a GenTree* in the set of non-standard args. // // Arguments: // node - a GenTree node to look for // // Return Value: // The index of the non-standard argument (a non-negative, unique, stable number). // If the node is not a non-standard argument, return -1. // int Find(GenTree* node) { for (int i = 0; i < args.Height(); i++) { if (node == args.Top(i).node) { return i; } } return -1; } //----------------------------------------------------------------------------- // Find: Look for a GenTree node in the non-standard arguments set. If found, // set the register to use for the node. // // Arguments: // node - a GenTree node to look for // pReg - an OUT argument. *pReg is set to the non-standard register to use if // 'node' is found in the non-standard argument set. // pKind - an OUT argument. *pKind is set to the kind of the non-standard arg. // // Return Value: // 'true' if 'node' is a non-standard argument. In this case, *pReg and *pKing are set. // 'false' otherwise (in this case, *pReg and *pKind are unmodified). // bool Find(GenTree* node, regNumber* pReg, NonStandardArgKind* pKind) { for (int i = 0; i < args.Height(); i++) { NonStandardArg& nsa = args.TopRef(i); if (node == nsa.node) { *pReg = nsa.reg; *pKind = nsa.kind; return true; } } return false; } //----------------------------------------------------------------------------- // Replace: Replace the non-standard argument node at a given index. This is done when // the original node was replaced via morphing, but we need to continue to assign a // particular non-standard arg to it. // // Arguments: // index - the index of the non-standard arg. It must exist. // node - the new GenTree node. // // Return Value: // None. // void Replace(int index, GenTree* node) { args.TopRef(index).node = node; } } nonStandardArgs(getAllocator(CMK_ArrayStack)); // Count of args. On first morph, this is counted before we've filled in the arg table. // On remorph, we grab it from the arg table. unsigned numArgs = 0; // First we need to count the args if (call->gtCallThisArg != nullptr) { numArgs++; } for (GenTreeCall::Use& use : call->Args()) { numArgs++; } // Insert or mark non-standard args. These are either outside the normal calling convention, or // arguments registers that don't follow the normal progression of argument registers in the calling // convention (such as for the ARM64 fixed return buffer argument x8). // // *********** NOTE ************* // The logic here must remain in sync with GetNonStandardAddedArgCount(), which is used to map arguments // in the implementation of fast tail call. // *********** END NOTE ********* CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_X86) || defined(TARGET_ARM) // The x86 and arm32 CORINFO_HELP_INIT_PINVOKE_FRAME helpers has a custom calling convention. // Set the argument registers correctly here. if (call->IsHelperCall(this, CORINFO_HELP_INIT_PINVOKE_FRAME)) { GenTreeCall::Use* args = call->gtCallArgs; GenTree* arg1 = args->GetNode(); assert(arg1 != nullptr); nonStandardArgs.Add(arg1, REG_PINVOKE_FRAME, NonStandardArgKind::PInvokeFrame); } #endif // defined(TARGET_X86) || defined(TARGET_ARM) #if defined(TARGET_ARM) // A non-standard calling convention using wrapper delegate invoke is used on ARM, only, for wrapper // delegates. It is used for VSD delegate calls where the VSD custom calling convention ABI requires passing // R4, a callee-saved register, with a special value. Since R4 is a callee-saved register, its value needs // to be preserved. Thus, the VM uses a wrapper delegate IL stub, which preserves R4 and also sets up R4 // correctly for the VSD call. The VM is simply reusing an existing mechanism (wrapper delegate IL stub) // to achieve its goal for delegate VSD call. See COMDelegate::NeedsWrapperDelegate() in the VM for details. else if (call->gtCallMoreFlags & GTF_CALL_M_WRAPPER_DELEGATE_INV) { GenTree* arg = call->gtCallThisArg->GetNode(); if (arg->OperIsLocal()) { arg = gtClone(arg, true); } else { GenTree* tmp = fgInsertCommaFormTemp(&arg); call->gtCallThisArg->SetNode(arg); call->gtFlags |= GTF_ASG; arg = tmp; } noway_assert(arg != nullptr); GenTree* newArg = new (this, GT_ADDR) GenTreeAddrMode(TYP_BYREF, arg, nullptr, 0, eeGetEEInfo()->offsetOfWrapperDelegateIndirectCell); // Append newArg as the last arg GenTreeCall::Use** insertionPoint = &call->gtCallArgs; for (; *insertionPoint != nullptr; insertionPoint = &((*insertionPoint)->NextRef())) { } *insertionPoint = gtNewCallArgs(newArg); numArgs++; nonStandardArgs.Add(newArg, virtualStubParamInfo->GetReg(), NonStandardArgKind::WrapperDelegateCell); } #endif // defined(TARGET_ARM) #if defined(TARGET_X86) // The x86 shift helpers have custom calling conventions and expect the lo part of the long to be in EAX and the // hi part to be in EDX. This sets the argument registers up correctly. else if (call->IsHelperCall(this, CORINFO_HELP_LLSH) || call->IsHelperCall(this, CORINFO_HELP_LRSH) || call->IsHelperCall(this, CORINFO_HELP_LRSZ)) { GenTreeCall::Use* args = call->gtCallArgs; GenTree* arg1 = args->GetNode(); assert(arg1 != nullptr); nonStandardArgs.Add(arg1, REG_LNGARG_LO, NonStandardArgKind::ShiftLow); args = args->GetNext(); GenTree* arg2 = args->GetNode(); assert(arg2 != nullptr); nonStandardArgs.Add(arg2, REG_LNGARG_HI, NonStandardArgKind::ShiftHigh); } #else // !TARGET_X86 // TODO-X86-CQ: Currently RyuJIT/x86 passes args on the stack, so this is not needed. // If/when we change that, the following code needs to be changed to correctly support the (TBD) managed calling // convention for x86/SSE. // If we have a Fixed Return Buffer argument register then we setup a non-standard argument for it. // // We don't use the fixed return buffer argument if we have the special unmanaged instance call convention. // That convention doesn't use the fixed return buffer register. // CLANG_FORMAT_COMMENT_ANCHOR; if (call->HasFixedRetBufArg()) { args = call->gtCallArgs; assert(args != nullptr); argx = call->gtCallArgs->GetNode(); // We don't increment numArgs here, since we already counted this argument above. nonStandardArgs.Add(argx, theFixedRetBuffReg(), NonStandardArgKind::FixedRetBuffer); } // We are allowed to have a Fixed Return Buffer argument combined // with any of the remaining non-standard arguments // CLANG_FORMAT_COMMENT_ANCHOR; if (call->IsVirtualStub()) { if (!call->IsTailCallViaJitHelper()) { GenTree* stubAddrArg = fgGetStubAddrArg(call); // And push the stub address onto the list of arguments call->gtCallArgs = gtPrependNewCallArg(stubAddrArg, call->gtCallArgs); numArgs++; nonStandardArgs.Add(stubAddrArg, stubAddrArg->GetRegNum(), NonStandardArgKind::VirtualStubCell); } else { // If it is a VSD call getting dispatched via tail call helper, // fgMorphTailCallViaJitHelper() would materialize stub addr as an additional // parameter added to the original arg list and hence no need to // add as a non-standard arg. } } else #endif // !TARGET_X86 if (call->gtCallType == CT_INDIRECT && (call->gtCallCookie != nullptr)) { assert(!call->IsUnmanaged()); GenTree* arg = call->gtCallCookie; noway_assert(arg != nullptr); call->gtCallCookie = nullptr; // All architectures pass the cookie in a register. call->gtCallArgs = gtPrependNewCallArg(arg, call->gtCallArgs); nonStandardArgs.Add(arg, REG_PINVOKE_COOKIE_PARAM, NonStandardArgKind::PInvokeCookie); numArgs++; // put destination into R10/EAX arg = gtClone(call->gtCallAddr, true); call->gtCallArgs = gtPrependNewCallArg(arg, call->gtCallArgs); numArgs++; nonStandardArgs.Add(arg, REG_PINVOKE_TARGET_PARAM, NonStandardArgKind::PInvokeTarget); // finally change this call to a helper call call->gtCallType = CT_HELPER; call->gtCallMethHnd = eeFindHelper(CORINFO_HELP_PINVOKE_CALLI); } #if defined(FEATURE_READYTORUN) // For arm/arm64, we dispatch code same as VSD using virtualStubParamInfo->GetReg() // for indirection cell address, which ZapIndirectHelperThunk expects. // For x64/x86 we use return address to get the indirection cell by disassembling the call site. // That is not possible for fast tailcalls, so we only need this logic for fast tailcalls on xarch. // Note that we call this before we know if something will be a fast tailcall or not. // That's ok; after making something a tailcall, we will invalidate this information // and reconstruct it if necessary. The tailcalling decision does not change since // this is a non-standard arg in a register. bool needsIndirectionCell = call->IsR2RRelativeIndir() && !call->IsDelegateInvoke(); #if defined(TARGET_XARCH) needsIndirectionCell &= call->IsFastTailCall(); #endif if (needsIndirectionCell) { assert(call->gtEntryPoint.addr != nullptr); size_t addrValue = (size_t)call->gtEntryPoint.addr; GenTree* indirectCellAddress = gtNewIconHandleNode(addrValue, GTF_ICON_FTN_ADDR); #ifdef DEBUG indirectCellAddress->AsIntCon()->gtTargetHandle = (size_t)call->gtCallMethHnd; #endif indirectCellAddress->SetRegNum(REG_R2R_INDIRECT_PARAM); #ifdef TARGET_ARM // Issue #xxxx : Don't attempt to CSE this constant on ARM32 // // This constant has specific register requirements, and LSRA doesn't currently correctly // handle them when the value is in a CSE'd local. indirectCellAddress->SetDoNotCSE(); #endif // TARGET_ARM // Push the stub address onto the list of arguments. call->gtCallArgs = gtPrependNewCallArg(indirectCellAddress, call->gtCallArgs); numArgs++; nonStandardArgs.Add(indirectCellAddress, indirectCellAddress->GetRegNum(), NonStandardArgKind::R2RIndirectionCell); } #endif if ((REG_VALIDATE_INDIRECT_CALL_ADDR != REG_ARG_0) && call->IsHelperCall(this, CORINFO_HELP_VALIDATE_INDIRECT_CALL)) { assert(call->gtCallArgs != nullptr); GenTreeCall::Use* args = call->gtCallArgs; GenTree* tar = args->GetNode(); nonStandardArgs.Add(tar, REG_VALIDATE_INDIRECT_CALL_ADDR, NonStandardArgKind::ValidateIndirectCallTarget); } // Allocate the fgArgInfo for the call node; // call->fgArgInfo = new (this, CMK_Unknown) fgArgInfo(this, call, numArgs); // Add the 'this' argument value, if present. if (call->gtCallThisArg != nullptr) { argx = call->gtCallThisArg->GetNode(); assert(argIndex == 0); assert(call->gtCallType == CT_USER_FUNC || call->gtCallType == CT_INDIRECT); assert(varTypeIsGC(argx) || (argx->gtType == TYP_I_IMPL)); const regNumber regNum = genMapIntRegArgNumToRegNum(intArgRegNum); const unsigned numRegs = 1; const unsigned byteSize = TARGET_POINTER_SIZE; const unsigned byteAlignment = TARGET_POINTER_SIZE; const bool isStruct = false; const bool isFloatHfa = false; // This is a register argument - put it in the table. call->fgArgInfo->AddRegArg(argIndex, argx, call->gtCallThisArg, regNum, numRegs, byteSize, byteAlignment, isStruct, isFloatHfa, callIsVararg UNIX_AMD64_ABI_ONLY_ARG(REG_STK) UNIX_AMD64_ABI_ONLY_ARG(0) UNIX_AMD64_ABI_ONLY_ARG(0) UNIX_AMD64_ABI_ONLY_ARG(nullptr)); intArgRegNum++; #ifdef WINDOWS_AMD64_ABI // Whenever we pass an integer register argument // we skip the corresponding floating point register argument fltArgRegNum++; #endif // WINDOWS_AMD64_ABI argIndex++; DEBUG_ARG_SLOTS_ONLY(argSlots++;) } #ifdef TARGET_X86 // Compute the maximum number of arguments that can be passed in registers. // For X86 we handle the varargs and unmanaged calling conventions #ifndef UNIX_X86_ABI if (call->gtFlags & GTF_CALL_POP_ARGS) { noway_assert(intArgRegNum < MAX_REG_ARG); // No more register arguments for varargs (CALL_POP_ARGS) maxRegArgs = intArgRegNum; // Add in the ret buff arg if (callHasRetBuffArg) maxRegArgs++; } #endif // UNIX_X86_ABI if (call->IsUnmanaged()) { noway_assert(intArgRegNum == 0); if (call->gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL) { noway_assert(call->gtCallArgs->GetNode()->TypeGet() == TYP_I_IMPL || call->gtCallArgs->GetNode()->TypeGet() == TYP_BYREF || call->gtCallArgs->GetNode()->gtOper == GT_NOP); // the arg was already morphed to a register (fgMorph called twice) maxRegArgs = 1; } else { maxRegArgs = 0; } #ifdef UNIX_X86_ABI // Add in the ret buff arg if (callHasRetBuffArg && call->unmgdCallConv != CorInfoCallConvExtension::C && // C and Stdcall calling conventions do not call->unmgdCallConv != CorInfoCallConvExtension::Stdcall) // use registers to pass arguments. maxRegArgs++; #endif } #endif // TARGET_X86 /* Morph the user arguments */ CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_ARM) // The ARM ABI has a concept of back-filling of floating-point argument registers, according // to the "Procedure Call Standard for the ARM Architecture" document, especially // section 6.1.2.3 "Parameter passing". Back-filling is where floating-point argument N+1 can // appear in a lower-numbered register than floating point argument N. That is, argument // register allocation is not strictly increasing. To support this, we need to keep track of unused // floating-point argument registers that we can back-fill. We only support 4-byte float and // 8-byte double types, and one to four element HFAs composed of these types. With this, we will // only back-fill single registers, since there is no way with these types to create // an alignment hole greater than one register. However, there can be up to 3 back-fill slots // available (with 16 FP argument registers). Consider this code: // // struct HFA { float x, y, z; }; // a three element HFA // void bar(float a1, // passed in f0 // double a2, // passed in f2/f3; skip f1 for alignment // HFA a3, // passed in f4/f5/f6 // double a4, // passed in f8/f9; skip f7 for alignment. NOTE: it doesn't fit in the f1 back-fill slot // HFA a5, // passed in f10/f11/f12 // double a6, // passed in f14/f15; skip f13 for alignment. NOTE: it doesn't fit in the f1 or f7 back-fill // // slots // float a7, // passed in f1 (back-filled) // float a8, // passed in f7 (back-filled) // float a9, // passed in f13 (back-filled) // float a10) // passed on the stack in [OutArg+0] // // Note that if we ever support FP types with larger alignment requirements, then there could // be more than single register back-fills. // // Once we assign a floating-pointer register to the stack, they all must be on the stack. // See "Procedure Call Standard for the ARM Architecture", section 6.1.2.3, "The back-filling // continues only so long as no VFP CPRC has been allocated to a slot on the stack." // We set anyFloatStackArgs to true when a floating-point argument has been assigned to the stack // and prevent any additional floating-point arguments from going in registers. bool anyFloatStackArgs = false; #endif // TARGET_ARM #ifdef UNIX_AMD64_ABI SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc; #endif // UNIX_AMD64_ABI #if defined(DEBUG) // Check that we have valid information about call's argument types. // For example: // load byte; call(int) -> CALL(PUTARG_TYPE byte(IND byte)); // load int; call(byte) -> CALL(PUTARG_TYPE int (IND int)); // etc. if (call->callSig != nullptr) { CORINFO_SIG_INFO* sig = call->callSig; const unsigned sigArgsCount = sig->numArgs; GenTreeCall::Use* nodeArgs = call->gtCallArgs; // It could include many arguments not included in `sig->numArgs`, for example, `this`, runtime lookup, cookie // etc. unsigned nodeArgsCount = 0; call->VisitOperands([&nodeArgsCount](GenTree* operand) -> GenTree::VisitResult { nodeArgsCount++; return GenTree::VisitResult::Continue; }); if (call->gtCallThisArg != nullptr) { // Handle the most common argument not in the `sig->numArgs`. // so the following check works on more methods. nodeArgsCount--; } assert(nodeArgsCount >= sigArgsCount); if ((nodeArgsCount == sigArgsCount) && ((Target::g_tgtArgOrder == Target::ARG_ORDER_R2L) || (nodeArgsCount == 1))) { CORINFO_ARG_LIST_HANDLE sigArg = sig->args; for (unsigned i = 0; i < sig->numArgs; ++i) { CORINFO_CLASS_HANDLE argClass; const CorInfoType corType = strip(info.compCompHnd->getArgType(sig, sigArg, &argClass)); const var_types sigType = JITtype2varType(corType); assert(nodeArgs != nullptr); const GenTree* nodeArg = nodeArgs->GetNode(); assert(nodeArg != nullptr); const var_types nodeType = nodeArg->TypeGet(); assert((nodeType == sigType) || varTypeIsStruct(sigType) || genTypeSize(nodeType) == genTypeSize(sigType)); sigArg = info.compCompHnd->getArgNext(sigArg); nodeArgs = nodeArgs->GetNext(); } assert(nodeArgs == nullptr); } } #endif // DEBUG for (args = call->gtCallArgs; args != nullptr; args = args->GetNext(), argIndex++) { argx = args->GetNode()->gtSkipPutArgType(); // Change the node to TYP_I_IMPL so we don't report GC info // NOTE: We deferred this from the importer because of the inliner. if (argx->IsLocalAddrExpr() != nullptr) { argx->gtType = TYP_I_IMPL; } // We should never have any ArgPlaceHolder nodes at this point. assert(!argx->IsArgPlaceHolderNode()); // Setup any HFA information about 'argx' bool isHfaArg = false; var_types hfaType = TYP_UNDEF; unsigned hfaSlots = 0; bool passUsingFloatRegs; unsigned argAlignBytes = TARGET_POINTER_SIZE; unsigned size = 0; unsigned byteSize = 0; if (GlobalJitOptions::compFeatureHfa) { hfaType = GetHfaType(argx); isHfaArg = varTypeIsValidHfaType(hfaType); #if defined(TARGET_ARM64) if (TargetOS::IsWindows) { // Make sure for vararg methods isHfaArg is not true. isHfaArg = callIsVararg ? false : isHfaArg; } #endif // defined(TARGET_ARM64) if (isHfaArg) { isHfaArg = true; hfaSlots = GetHfaCount(argx); // If we have a HFA struct it's possible we transition from a method that originally // only had integer types to now start having FP types. We have to communicate this // through this flag since LSRA later on will use this flag to determine whether // or not to track the FP register set. // compFloatingPointUsed = true; } } const bool isFloatHfa = (hfaType == TYP_FLOAT); #ifdef TARGET_ARM passUsingFloatRegs = !callIsVararg && (isHfaArg || varTypeUsesFloatReg(argx)) && !opts.compUseSoftFP; bool passUsingIntRegs = passUsingFloatRegs ? false : (intArgRegNum < MAX_REG_ARG); // We don't use the "size" return value from InferOpSizeAlign(). codeGen->InferOpSizeAlign(argx, &argAlignBytes); argAlignBytes = roundUp(argAlignBytes, TARGET_POINTER_SIZE); if (argAlignBytes == 2 * TARGET_POINTER_SIZE) { if (passUsingFloatRegs) { if (fltArgRegNum % 2 == 1) { fltArgSkippedRegMask |= genMapArgNumToRegMask(fltArgRegNum, TYP_FLOAT); fltArgRegNum++; } } else if (passUsingIntRegs) { if (intArgRegNum % 2 == 1) { argSkippedRegMask |= genMapArgNumToRegMask(intArgRegNum, TYP_I_IMPL); intArgRegNum++; } } #if defined(DEBUG) if (argSlots % 2 == 1) { argSlots++; } #endif } #elif defined(TARGET_ARM64) assert(!callIsVararg || !isHfaArg); passUsingFloatRegs = !callIsVararg && (isHfaArg || varTypeUsesFloatReg(argx)); #elif defined(TARGET_AMD64) passUsingFloatRegs = varTypeIsFloating(argx); #elif defined(TARGET_X86) passUsingFloatRegs = false; #else #error Unsupported or unset target architecture #endif // TARGET* bool isBackFilled = false; unsigned nextFltArgRegNum = fltArgRegNum; // This is the next floating-point argument register number to use var_types structBaseType = TYP_STRUCT; unsigned structSize = 0; bool passStructByRef = false; bool isStructArg; GenTree* actualArg = argx->gtEffectiveVal(true /* Commas only */); // // Figure out the size of the argument. This is either in number of registers, or number of // TARGET_POINTER_SIZE stack slots, or the sum of these if the argument is split between the registers and // the stack. // isStructArg = varTypeIsStruct(argx); CORINFO_CLASS_HANDLE objClass = NO_CLASS_HANDLE; if (isStructArg) { objClass = gtGetStructHandle(argx); if (argx->TypeGet() == TYP_STRUCT) { // For TYP_STRUCT arguments we must have an OBJ, LCL_VAR or MKREFANY switch (actualArg->OperGet()) { case GT_OBJ: structSize = actualArg->AsObj()->GetLayout()->GetSize(); assert(structSize == info.compCompHnd->getClassSize(objClass)); break; case GT_LCL_VAR: structSize = lvaGetDesc(actualArg->AsLclVarCommon())->lvExactSize; break; case GT_MKREFANY: structSize = info.compCompHnd->getClassSize(objClass); break; default: BADCODE("illegal argument tree in fgInitArgInfo"); break; } } else { structSize = genTypeSize(argx); assert(structSize == info.compCompHnd->getClassSize(objClass)); } } #if defined(TARGET_AMD64) #ifdef UNIX_AMD64_ABI if (!isStructArg) { size = 1; // On AMD64, all primitives fit in a single (64-bit) 'slot' byteSize = genTypeSize(argx); } else { size = (unsigned)(roundUp(structSize, TARGET_POINTER_SIZE)) / TARGET_POINTER_SIZE; byteSize = structSize; eeGetSystemVAmd64PassStructInRegisterDescriptor(objClass, &structDesc); } #else // !UNIX_AMD64_ABI size = 1; // On AMD64 Windows, all args fit in a single (64-bit) 'slot' if (!isStructArg) { byteSize = genTypeSize(argx); } #endif // UNIX_AMD64_ABI #elif defined(TARGET_ARM64) if (isStructArg) { if (isHfaArg) { // HFA structs are passed by value in multiple registers. // The "size" in registers may differ the size in pointer-sized units. CORINFO_CLASS_HANDLE structHnd = gtGetStructHandle(argx); size = GetHfaCount(structHnd); byteSize = info.compCompHnd->getClassSize(structHnd); } else { // Structs are either passed in 1 or 2 (64-bit) slots. // Structs that are the size of 2 pointers are passed by value in multiple registers, // if sufficient registers are available. // Structs that are larger than 2 pointers (except for HFAs) are passed by // reference (to a copy) size = (unsigned)(roundUp(structSize, TARGET_POINTER_SIZE)) / TARGET_POINTER_SIZE; byteSize = structSize; if (size > 2) { size = 1; } } // Note that there are some additional rules for multireg structs. // (i.e they cannot be split between registers and the stack) } else { size = 1; // Otherwise, all primitive types fit in a single (64-bit) 'slot' byteSize = genTypeSize(argx); } #elif defined(TARGET_ARM) || defined(TARGET_X86) if (isStructArg) { size = (unsigned)(roundUp(structSize, TARGET_POINTER_SIZE)) / TARGET_POINTER_SIZE; byteSize = structSize; } else { // The typical case. // Long/double type argument(s) will be modified as needed in Lowering. size = genTypeStSz(argx->gtType); byteSize = genTypeSize(argx); } #else #error Unsupported or unset target architecture #endif // TARGET_XXX if (isStructArg) { assert(argx == args->GetNode()); assert(structSize != 0); structPassingKind howToPassStruct; structBaseType = getArgTypeForStruct(objClass, &howToPassStruct, callIsVararg, structSize); passStructByRef = (howToPassStruct == SPK_ByReference); if (howToPassStruct == SPK_ByReference) { byteSize = TARGET_POINTER_SIZE; } else { byteSize = structSize; } if (howToPassStruct == SPK_PrimitiveType) { #ifdef TARGET_ARM // TODO-CQ: getArgTypeForStruct should *not* return TYP_DOUBLE for a double struct, // or for a struct of two floats. This causes the struct to be address-taken. if (structBaseType == TYP_DOUBLE) { size = 2; } else #endif // TARGET_ARM { size = 1; } } else if (passStructByRef) { size = 1; } } const var_types argType = args->GetNode()->TypeGet(); if (args->GetNode()->OperIs(GT_PUTARG_TYPE)) { byteSize = genTypeSize(argType); } // The 'size' value has now must have been set. (the original value of zero is an invalid value) assert(size != 0); assert(byteSize != 0); if (compMacOsArm64Abi()) { // Arm64 Apple has a special ABI for passing small size arguments on stack, // bytes are aligned to 1-byte, shorts to 2-byte, int/float to 4-byte, etc. // It means passing 8 1-byte arguments on stack can take as small as 8 bytes. argAlignBytes = eeGetArgAlignment(argType, isFloatHfa); } // // Figure out if the argument will be passed in a register. // bool isRegArg = false; NonStandardArgKind nonStandardArgKind = NonStandardArgKind::None; regNumber nonStdRegNum = REG_NA; if (isRegParamType(genActualType(argx->TypeGet())) #ifdef UNIX_AMD64_ABI && (!isStructArg || structDesc.passedInRegisters) #elif defined(TARGET_X86) || (isStructArg && isTrivialPointerSizedStruct(objClass)) #endif ) { #ifdef TARGET_ARM if (passUsingFloatRegs) { // First, see if it can be back-filled if (!anyFloatStackArgs && // Is it legal to back-fill? (We haven't put any FP args on the stack yet) (fltArgSkippedRegMask != RBM_NONE) && // Is there an available back-fill slot? (size == 1)) // The size to back-fill is one float register { // Back-fill the register. isBackFilled = true; regMaskTP backFillBitMask = genFindLowestBit(fltArgSkippedRegMask); fltArgSkippedRegMask &= ~backFillBitMask; // Remove the back-filled register(s) from the skipped mask nextFltArgRegNum = genMapFloatRegNumToRegArgNum(genRegNumFromMask(backFillBitMask)); assert(nextFltArgRegNum < MAX_FLOAT_REG_ARG); } // Does the entire float, double, or HFA fit in the FP arg registers? // Check if the last register needed is still in the argument register range. isRegArg = (nextFltArgRegNum + size - 1) < MAX_FLOAT_REG_ARG; if (!isRegArg) { anyFloatStackArgs = true; } } else { isRegArg = intArgRegNum < MAX_REG_ARG; } #elif defined(TARGET_ARM64) if (passUsingFloatRegs) { // Check if the last register needed is still in the fp argument register range. isRegArg = (nextFltArgRegNum + (size - 1)) < MAX_FLOAT_REG_ARG; // Do we have a HFA arg that we wanted to pass in registers, but we ran out of FP registers? if (isHfaArg && !isRegArg) { // recompute the 'size' so that it represent the number of stack slots rather than the number of // registers // unsigned roundupSize = (unsigned)roundUp(structSize, TARGET_POINTER_SIZE); size = roundupSize / TARGET_POINTER_SIZE; // We also must update fltArgRegNum so that we no longer try to // allocate any new floating point registers for args // This prevents us from backfilling a subsequent arg into d7 // fltArgRegNum = MAX_FLOAT_REG_ARG; } } else { // Check if the last register needed is still in the int argument register range. isRegArg = (intArgRegNum + (size - 1)) < maxRegArgs; // Did we run out of registers when we had a 16-byte struct (size===2) ? // (i.e we only have one register remaining but we needed two registers to pass this arg) // This prevents us from backfilling a subsequent arg into x7 // if (!isRegArg && (size > 1)) { // Arm64 windows native varargs allows splitting a 16 byte struct between stack // and the last general purpose register. if (TargetOS::IsWindows && callIsVararg) { // Override the decision and force a split. isRegArg = (intArgRegNum + (size - 1)) <= maxRegArgs; } else { // We also must update intArgRegNum so that we no longer try to // allocate any new general purpose registers for args // intArgRegNum = maxRegArgs; } } } #else // not TARGET_ARM or TARGET_ARM64 #if defined(UNIX_AMD64_ABI) // Here a struct can be passed in register following the classifications of its members and size. // Now make sure there are actually enough registers to do so. if (isStructArg) { unsigned int structFloatRegs = 0; unsigned int structIntRegs = 0; for (unsigned int i = 0; i < structDesc.eightByteCount; i++) { if (structDesc.IsIntegralSlot(i)) { structIntRegs++; } else if (structDesc.IsSseSlot(i)) { structFloatRegs++; } } isRegArg = ((nextFltArgRegNum + structFloatRegs) <= MAX_FLOAT_REG_ARG) && ((intArgRegNum + structIntRegs) <= MAX_REG_ARG); } else { if (passUsingFloatRegs) { isRegArg = nextFltArgRegNum < MAX_FLOAT_REG_ARG; } else { isRegArg = intArgRegNum < MAX_REG_ARG; } } #else // !defined(UNIX_AMD64_ABI) isRegArg = (intArgRegNum + (size - 1)) < maxRegArgs; #endif // !defined(UNIX_AMD64_ABI) #endif // TARGET_ARM } else { isRegArg = false; } // If there are nonstandard args (outside the calling convention) they were inserted above // and noted them in a table so we can recognize them here and build their argInfo. // // They should not affect the placement of any other args or stack space required. // Example: on AMD64 R10 and R11 are used for indirect VSD (generic interface) and cookie calls. bool isNonStandard = nonStandardArgs.Find(argx, &nonStdRegNum, &nonStandardArgKind); if (isNonStandard) { isRegArg = (nonStdRegNum != REG_STK); } else if (call->IsTailCallViaJitHelper()) { // We have already (before calling fgMorphArgs()) appended the 4 special args // required by the x86 tailcall helper. These args are required to go on the // stack. Force them to the stack here. assert(numArgs >= 4); if (argIndex >= numArgs - 4) { isRegArg = false; } } // Now we know if the argument goes in registers or not and how big it is. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_ARM // If we ever allocate a floating point argument to the stack, then all // subsequent HFA/float/double arguments go on the stack. if (!isRegArg && passUsingFloatRegs) { for (; fltArgRegNum < MAX_FLOAT_REG_ARG; ++fltArgRegNum) { fltArgSkippedRegMask |= genMapArgNumToRegMask(fltArgRegNum, TYP_FLOAT); } } // If we think we're going to split a struct between integer registers and the stack, check to // see if we've already assigned a floating-point arg to the stack. if (isRegArg && // We decided above to use a register for the argument !passUsingFloatRegs && // We're using integer registers (intArgRegNum + size > MAX_REG_ARG) && // We're going to split a struct type onto registers and stack anyFloatStackArgs) // We've already used the stack for a floating-point argument { isRegArg = false; // Change our mind; don't pass this struct partially in registers // Skip the rest of the integer argument registers for (; intArgRegNum < MAX_REG_ARG; ++intArgRegNum) { argSkippedRegMask |= genMapArgNumToRegMask(intArgRegNum, TYP_I_IMPL); } } #endif // TARGET_ARM // Now create the fgArgTabEntry. fgArgTabEntry* newArgEntry; if (isRegArg) { regNumber nextRegNum = REG_STK; #if defined(UNIX_AMD64_ABI) regNumber nextOtherRegNum = REG_STK; unsigned int structFloatRegs = 0; unsigned int structIntRegs = 0; #endif // defined(UNIX_AMD64_ABI) if (isNonStandard) { nextRegNum = nonStdRegNum; } #if defined(UNIX_AMD64_ABI) else if (isStructArg && structDesc.passedInRegisters) { // It is a struct passed in registers. Assign the next available register. assert((structDesc.eightByteCount <= 2) && "Too many eightbytes."); regNumber* nextRegNumPtrs[2] = {&nextRegNum, &nextOtherRegNum}; for (unsigned int i = 0; i < structDesc.eightByteCount; i++) { if (structDesc.IsIntegralSlot(i)) { *nextRegNumPtrs[i] = genMapIntRegArgNumToRegNum(intArgRegNum + structIntRegs); ++structIntRegs; } else if (structDesc.IsSseSlot(i)) { *nextRegNumPtrs[i] = genMapFloatRegArgNumToRegNum(nextFltArgRegNum + structFloatRegs); ++structFloatRegs; } } } #endif // defined(UNIX_AMD64_ABI) else { // fill in or update the argInfo table nextRegNum = passUsingFloatRegs ? genMapFloatRegArgNumToRegNum(nextFltArgRegNum) : genMapIntRegArgNumToRegNum(intArgRegNum); } #ifdef TARGET_AMD64 #ifndef UNIX_AMD64_ABI assert(size == 1); #endif #endif // This is a register argument - put it in the table newArgEntry = call->fgArgInfo->AddRegArg(argIndex, argx, args, nextRegNum, size, byteSize, argAlignBytes, isStructArg, isFloatHfa, callIsVararg UNIX_AMD64_ABI_ONLY_ARG(nextOtherRegNum) UNIX_AMD64_ABI_ONLY_ARG(structIntRegs) UNIX_AMD64_ABI_ONLY_ARG(structFloatRegs) UNIX_AMD64_ABI_ONLY_ARG(&structDesc)); newArgEntry->SetIsBackFilled(isBackFilled); // Set up the next intArgRegNum and fltArgRegNum values. if (!isBackFilled) { #if defined(UNIX_AMD64_ABI) if (isStructArg) { // For this case, we've already set the regNums in the argTabEntry intArgRegNum += structIntRegs; fltArgRegNum += structFloatRegs; } else #endif // defined(UNIX_AMD64_ABI) { if (!isNonStandard) { #if FEATURE_ARG_SPLIT // Check for a split (partially enregistered) struct if (compFeatureArgSplit() && !passUsingFloatRegs && ((intArgRegNum + size) > MAX_REG_ARG)) { // This indicates a partial enregistration of a struct type assert((isStructArg) || argx->OperIs(GT_FIELD_LIST) || argx->OperIsCopyBlkOp() || (argx->gtOper == GT_COMMA && (argx->gtFlags & GTF_ASG))); unsigned numRegsPartial = MAX_REG_ARG - intArgRegNum; assert((unsigned char)numRegsPartial == numRegsPartial); call->fgArgInfo->SplitArg(argIndex, numRegsPartial, size - numRegsPartial); } #endif // FEATURE_ARG_SPLIT if (passUsingFloatRegs) { fltArgRegNum += size; #ifdef WINDOWS_AMD64_ABI // Whenever we pass an integer register argument // we skip the corresponding floating point register argument intArgRegNum = min(intArgRegNum + size, MAX_REG_ARG); #endif // WINDOWS_AMD64_ABI // No supported architecture supports partial structs using float registers. assert(fltArgRegNum <= MAX_FLOAT_REG_ARG); } else { // Increment intArgRegNum by 'size' registers intArgRegNum += size; #ifdef WINDOWS_AMD64_ABI fltArgRegNum = min(fltArgRegNum + size, MAX_FLOAT_REG_ARG); #endif // WINDOWS_AMD64_ABI } } } } } else // We have an argument that is not passed in a register { // This is a stack argument - put it in the table newArgEntry = call->fgArgInfo->AddStkArg(argIndex, argx, args, size, byteSize, argAlignBytes, isStructArg, isFloatHfa, callIsVararg); #ifdef UNIX_AMD64_ABI // TODO-Amd64-Unix-CQ: This is temporary (see also in fgMorphArgs). if (structDesc.passedInRegisters) { newArgEntry->structDesc.CopyFrom(structDesc); } #endif } newArgEntry->nonStandardArgKind = nonStandardArgKind; if (GlobalJitOptions::compFeatureHfa) { if (isHfaArg) { newArgEntry->SetHfaType(hfaType, hfaSlots); } } newArgEntry->SetMultiRegNums(); noway_assert(newArgEntry != nullptr); if (newArgEntry->isStruct) { newArgEntry->passedByRef = passStructByRef; newArgEntry->argType = (structBaseType == TYP_UNKNOWN) ? argx->TypeGet() : structBaseType; } else { newArgEntry->argType = argx->TypeGet(); } DEBUG_ARG_SLOTS_ONLY(argSlots += size;) } // end foreach argument loop #ifdef DEBUG if (verbose) { JITDUMP("ArgTable for %d.%s after fgInitArgInfo:\n", call->gtTreeID, GenTree::OpName(call->gtOper)); call->fgArgInfo->Dump(this); JITDUMP("\n"); } #endif } //------------------------------------------------------------------------ // fgMorphArgs: Walk and transform (morph) the arguments of a call // // Arguments: // callNode - the call for which we are doing the argument morphing // // Return Value: // Like most morph methods, this method returns the morphed node, // though in this case there are currently no scenarios where the // node itself is re-created. // // Notes: // This calls fgInitArgInfo to create the 'fgArgInfo' for the call. // If it has already been created, that method will simply return. // // This method changes the state of the call node. It uses the existence // of gtCallLateArgs (the late arguments list) to determine if it has // already done the first round of morphing. // // The first time it is called (i.e. during global morphing), this method // computes the "late arguments". This is when it determines which arguments // need to be evaluated to temps prior to the main argument setup, and which // can be directly evaluated into the argument location. It also creates a // second argument list (gtCallLateArgs) that does the final placement of the // arguments, e.g. into registers or onto the stack. // // The "non-late arguments", aka the gtCallArgs, are doing the in-order // evaluation of the arguments that might have side-effects, such as embedded // assignments, calls or possible throws. In these cases, it and earlier // arguments must be evaluated to temps. // // On targets with a fixed outgoing argument area (FEATURE_FIXED_OUT_ARGS), // if we have any nested calls, we need to defer the copying of the argument // into the fixed argument area until after the call. If the argument did not // otherwise need to be computed into a temp, it is moved to gtCallLateArgs and // replaced in the "early" arg list (gtCallArgs) with a placeholder node. #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function #endif GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call) { GenTreeCall::Use* args; GenTree* argx; GenTreeFlags flagsSummary = GTF_EMPTY; unsigned argIndex = 0; DEBUG_ARG_SLOTS_ONLY(unsigned argSlots = 0;) bool reMorphing = call->AreArgsComplete(); // Set up the fgArgInfo. fgInitArgInfo(call); JITDUMP("%sMorphing args for %d.%s:\n", (reMorphing) ? "Re" : "", call->gtTreeID, GenTree::OpName(call->gtOper)); // If we are remorphing, process the late arguments (which were determined by a previous caller). if (reMorphing) { for (GenTreeCall::Use& use : call->LateArgs()) { use.SetNode(fgMorphTree(use.GetNode())); flagsSummary |= use.GetNode()->gtFlags; } assert(call->fgArgInfo != nullptr); } call->fgArgInfo->RemorphReset(); // First we morph the argument subtrees ('this' pointer, arguments, etc.). // During the first call to fgMorphArgs we also record the // information about late arguments we have in 'fgArgInfo'. // This information is used later to contruct the gtCallLateArgs */ // Process the 'this' argument value, if present. if (call->gtCallThisArg != nullptr) { argx = call->gtCallThisArg->GetNode(); fgArgTabEntry* thisArgEntry = call->fgArgInfo->GetArgEntry(0, reMorphing); argx = fgMorphTree(argx); call->gtCallThisArg->SetNode(argx); // This is a register argument - possibly update it in the table. call->fgArgInfo->UpdateRegArg(thisArgEntry, argx, reMorphing); flagsSummary |= argx->gtFlags; if (!reMorphing && call->IsExpandedEarly() && call->IsVirtualVtable()) { if (!argx->OperIsLocal()) { thisArgEntry->needTmp = true; call->fgArgInfo->SetNeedsTemps(); } } assert(argIndex == 0); argIndex++; DEBUG_ARG_SLOTS_ONLY(argSlots++;) } // Note that this name is a bit of a misnomer - it indicates that there are struct args // that occupy more than a single slot that are passed by value (not necessarily in regs). bool hasMultiregStructArgs = false; for (args = call->gtCallArgs; args != nullptr; args = args->GetNext(), argIndex++) { GenTree** parentArgx = &args->NodeRef(); fgArgTabEntry* argEntry = call->fgArgInfo->GetArgEntry(argIndex, reMorphing); // Morph the arg node, and update the parent and argEntry pointers. argx = *parentArgx; argx = fgMorphTree(argx); *parentArgx = argx; assert(argx == args->GetNode()); DEBUG_ARG_SLOTS_ONLY(unsigned size = argEntry->getSize();) CORINFO_CLASS_HANDLE copyBlkClass = NO_CLASS_HANDLE; #if defined(DEBUG_ARG_SLOTS) if (!compMacOsArm64Abi()) { if (argEntry->GetByteAlignment() == 2 * TARGET_POINTER_SIZE) { if (argSlots % 2 == 1) { argSlots++; } } } #endif // DEBUG if (argEntry->isNonStandard() && argEntry->isPassedInRegisters()) { // We need to update the node field for this nonStandard arg here // as it may have been changed by the call to fgMorphTree. call->fgArgInfo->UpdateRegArg(argEntry, argx, reMorphing); flagsSummary |= argx->gtFlags; continue; } DEBUG_ARG_SLOTS_ASSERT(size != 0); DEBUG_ARG_SLOTS_ONLY(argSlots += argEntry->getSlotCount();) if (argx->IsLocalAddrExpr() != nullptr) { argx->gtType = TYP_I_IMPL; } // Get information about this argument. var_types hfaType = argEntry->GetHfaType(); bool isHfaArg = (hfaType != TYP_UNDEF); bool passUsingFloatRegs = argEntry->isPassedInFloatRegisters(); unsigned structSize = 0; // Struct arguments may be morphed into a node that is not a struct type. // In such case the fgArgTabEntry keeps track of whether the original node (before morphing) // was a struct and the struct classification. bool isStructArg = argEntry->isStruct; GenTree* argObj = argx->gtEffectiveVal(true /*commaOnly*/); if (isStructArg && varTypeIsStruct(argObj) && !argObj->OperIs(GT_ASG, GT_MKREFANY, GT_FIELD_LIST, GT_ARGPLACE)) { CORINFO_CLASS_HANDLE objClass = gtGetStructHandle(argObj); unsigned originalSize; if (argObj->TypeGet() == TYP_STRUCT) { if (argObj->OperIs(GT_OBJ)) { // Get the size off the OBJ node. originalSize = argObj->AsObj()->GetLayout()->GetSize(); assert(originalSize == info.compCompHnd->getClassSize(objClass)); } else { // We have a BADCODE assert for this in fgInitArgInfo. assert(argObj->OperIs(GT_LCL_VAR)); originalSize = lvaGetDesc(argObj->AsLclVarCommon())->lvExactSize; } } else { originalSize = genTypeSize(argx); assert(originalSize == info.compCompHnd->getClassSize(objClass)); } unsigned roundupSize = (unsigned)roundUp(originalSize, TARGET_POINTER_SIZE); var_types structBaseType = argEntry->argType; // First, handle the case where the argument is passed by reference. if (argEntry->passedByRef) { DEBUG_ARG_SLOTS_ASSERT(size == 1); copyBlkClass = objClass; #ifdef UNIX_AMD64_ABI assert(!"Structs are not passed by reference on x64/ux"); #endif // UNIX_AMD64_ABI } else // This is passed by value. { // Check to see if we can transform this into load of a primitive type. // 'size' must be the number of pointer sized items DEBUG_ARG_SLOTS_ASSERT(size == roundupSize / TARGET_POINTER_SIZE); structSize = originalSize; unsigned passingSize = originalSize; // Check to see if we can transform this struct load (GT_OBJ) into a GT_IND of the appropriate size. // When it can do this is platform-dependent: // - In general, it can be done for power of 2 structs that fit in a single register. // - For ARM and ARM64 it must also be a non-HFA struct, or have a single field. // - This is irrelevant for X86, since structs are always passed by value on the stack. GenTree* lclVar = fgIsIndirOfAddrOfLocal(argObj); bool canTransform = false; if (structBaseType != TYP_STRUCT) { if (isPow2(passingSize)) { canTransform = (!argEntry->IsHfaArg() || (passingSize == genTypeSize(argEntry->GetHfaType()))); } #if defined(TARGET_ARM64) || defined(UNIX_AMD64_ABI) // For ARM64 or AMD64/UX we can pass non-power-of-2 structs in a register, but we can // only transform in that case if the arg is a local. // TODO-CQ: This transformation should be applicable in general, not just for the ARM64 // or UNIX_AMD64_ABI cases where they will be passed in registers. else { canTransform = (lclVar != nullptr); passingSize = genTypeSize(structBaseType); } #endif // TARGET_ARM64 || UNIX_AMD64_ABI } if (!canTransform) { #if defined(TARGET_AMD64) #ifndef UNIX_AMD64_ABI // On Windows structs are always copied and passed by reference (handled above) unless they are // passed by value in a single register. assert(size == 1); copyBlkClass = objClass; #else // UNIX_AMD64_ABI // On Unix, structs are always passed by value. // We only need a copy if we have one of the following: // - The sizes don't match for a non-lclVar argument. // - We have a known struct type (e.g. SIMD) that requires multiple registers. // TODO-Amd64-Unix-Throughput: We don't need to keep the structDesc in the argEntry if it's not // actually passed in registers. if (argEntry->isPassedInRegisters()) { if (argObj->OperIs(GT_OBJ)) { if (passingSize != structSize) { copyBlkClass = objClass; } } else if (lclVar == nullptr) { // This should only be the case of a value directly producing a known struct type. assert(argObj->TypeGet() != TYP_STRUCT); if (argEntry->numRegs > 1) { copyBlkClass = objClass; } } } #endif // UNIX_AMD64_ABI #elif defined(TARGET_ARM64) if ((passingSize != structSize) && (lclVar == nullptr)) { copyBlkClass = objClass; } #endif #ifdef TARGET_ARM // TODO-1stClassStructs: Unify these conditions across targets. if (((lclVar != nullptr) && (lvaGetPromotionType(lclVar->AsLclVarCommon()->GetLclNum()) == PROMOTION_TYPE_INDEPENDENT)) || ((argObj->OperIs(GT_OBJ)) && (passingSize != structSize))) { copyBlkClass = objClass; } if (structSize < TARGET_POINTER_SIZE) { copyBlkClass = objClass; } #endif // TARGET_ARM } else { // We have a struct argument that fits into a register, and it is either a power of 2, // or a local. // Change our argument, as needed, into a value of the appropriate type. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_ARM DEBUG_ARG_SLOTS_ASSERT((size == 1) || ((structBaseType == TYP_DOUBLE) && (size == 2))); #else DEBUG_ARG_SLOTS_ASSERT((size == 1) || (varTypeIsSIMD(structBaseType) && size == (genTypeSize(structBaseType) / REGSIZE_BYTES))); #endif assert((structBaseType != TYP_STRUCT) && (genTypeSize(structBaseType) >= originalSize)); if (argObj->OperIs(GT_OBJ)) { argObj->ChangeOper(GT_IND); // Now see if we can fold *(&X) into X if (argObj->AsOp()->gtOp1->gtOper == GT_ADDR) { GenTree* temp = argObj->AsOp()->gtOp1->AsOp()->gtOp1; // Keep the DONT_CSE flag in sync // (as the addr always marks it for its op1) temp->gtFlags &= ~GTF_DONT_CSE; temp->gtFlags |= (argObj->gtFlags & GTF_DONT_CSE); DEBUG_DESTROY_NODE(argObj->AsOp()->gtOp1); // GT_ADDR DEBUG_DESTROY_NODE(argObj); // GT_IND argObj = temp; *parentArgx = temp; argx = temp; } } if (argObj->gtOper == GT_LCL_VAR) { unsigned lclNum = argObj->AsLclVarCommon()->GetLclNum(); LclVarDsc* varDsc = lvaGetDesc(lclNum); if (varDsc->lvPromoted) { if (varDsc->lvFieldCnt == 1) { // get the first and only promoted field LclVarDsc* fieldVarDsc = lvaGetDesc(varDsc->lvFieldLclStart); if (genTypeSize(fieldVarDsc->TypeGet()) >= originalSize) { // we will use the first and only promoted field argObj->AsLclVarCommon()->SetLclNum(varDsc->lvFieldLclStart); if (varTypeIsEnregisterable(fieldVarDsc->TypeGet()) && (genTypeSize(fieldVarDsc->TypeGet()) == originalSize)) { // Just use the existing field's type argObj->gtType = fieldVarDsc->TypeGet(); } else { // Can't use the existing field's type, so use GT_LCL_FLD to swizzle // to a new type lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::SwizzleArg)); argObj->ChangeOper(GT_LCL_FLD); argObj->gtType = structBaseType; } assert(varTypeIsEnregisterable(argObj->TypeGet())); assert(copyBlkClass == NO_CLASS_HANDLE); } else { // use GT_LCL_FLD to swizzle the single field struct to a new type lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::SwizzleArg)); argObj->ChangeOper(GT_LCL_FLD); argObj->gtType = structBaseType; } } else { // The struct fits into a single register, but it has been promoted into its // constituent fields, and so we have to re-assemble it copyBlkClass = objClass; } } else if (genTypeSize(varDsc->TypeGet()) != genTypeSize(structBaseType)) { // Not a promoted struct, so just swizzle the type by using GT_LCL_FLD lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::SwizzleArg)); argObj->ChangeOper(GT_LCL_FLD); argObj->gtType = structBaseType; } } else { // Not a GT_LCL_VAR, so we can just change the type on the node argObj->gtType = structBaseType; } assert(varTypeIsEnregisterable(argObj->TypeGet()) || ((copyBlkClass != NO_CLASS_HANDLE) && varTypeIsEnregisterable(structBaseType))); } #if !defined(UNIX_AMD64_ABI) && !defined(TARGET_ARMARCH) // TODO-CQ-XARCH: there is no need for a temp copy if we improve our code generation in // `genPutStructArgStk` for xarch like we did it for Arm/Arm64. // We still have a struct unless we converted the GT_OBJ into a GT_IND above... if (isHfaArg && passUsingFloatRegs) { } else if (structBaseType == TYP_STRUCT) { // If the valuetype size is not a multiple of TARGET_POINTER_SIZE, // we must copyblk to a temp before doing the obj to avoid // the obj reading memory past the end of the valuetype CLANG_FORMAT_COMMENT_ANCHOR; if (roundupSize > originalSize) { copyBlkClass = objClass; // There are a few special cases where we can omit using a CopyBlk // where we normally would need to use one. if (argObj->OperIs(GT_OBJ) && argObj->AsObj()->gtGetOp1()->IsLocalAddrExpr() != nullptr) // Is the source a LclVar? { copyBlkClass = NO_CLASS_HANDLE; } } } #endif // !UNIX_AMD64_ABI } } if (argEntry->isPassedInRegisters()) { call->fgArgInfo->UpdateRegArg(argEntry, argx, reMorphing); } else { call->fgArgInfo->UpdateStkArg(argEntry, argx, reMorphing); } if (copyBlkClass != NO_CLASS_HANDLE) { fgMakeOutgoingStructArgCopy(call, args, copyBlkClass); } if (argx->gtOper == GT_MKREFANY) { // 'Lower' the MKREFANY tree and insert it. noway_assert(!reMorphing); #ifdef TARGET_X86 // Build the mkrefany as a GT_FIELD_LIST GenTreeFieldList* fieldList = new (this, GT_FIELD_LIST) GenTreeFieldList(); fieldList->AddField(this, argx->AsOp()->gtGetOp1(), OFFSETOF__CORINFO_TypedReference__dataPtr, TYP_BYREF); fieldList->AddField(this, argx->AsOp()->gtGetOp2(), OFFSETOF__CORINFO_TypedReference__type, TYP_I_IMPL); fgArgTabEntry* fp = gtArgEntryByNode(call, argx); args->SetNode(fieldList); assert(fp->GetNode() == fieldList); #else // !TARGET_X86 // Get a new temp // Here we don't need unsafe value cls check since the addr of temp is used only in mkrefany unsigned tmp = lvaGrabTemp(true DEBUGARG("by-value mkrefany struct argument")); lvaSetStruct(tmp, impGetRefAnyClass(), false); // Build the mkrefany as a comma node: // (tmp.ptr=argx),(tmp.type=handle) GenTreeLclFld* destPtrSlot = gtNewLclFldNode(tmp, TYP_I_IMPL, OFFSETOF__CORINFO_TypedReference__dataPtr); GenTreeLclFld* destTypeSlot = gtNewLclFldNode(tmp, TYP_I_IMPL, OFFSETOF__CORINFO_TypedReference__type); destPtrSlot->SetFieldSeq(GetFieldSeqStore()->CreateSingleton(GetRefanyDataField())); destPtrSlot->gtFlags |= GTF_VAR_DEF; destTypeSlot->SetFieldSeq(GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField())); destTypeSlot->gtFlags |= GTF_VAR_DEF; GenTree* asgPtrSlot = gtNewAssignNode(destPtrSlot, argx->AsOp()->gtOp1); GenTree* asgTypeSlot = gtNewAssignNode(destTypeSlot, argx->AsOp()->gtOp2); GenTree* asg = gtNewOperNode(GT_COMMA, TYP_VOID, asgPtrSlot, asgTypeSlot); // Change the expression to "(tmp=val)" args->SetNode(asg); // EvalArgsToTemps will cause tmp to actually get loaded as the argument call->fgArgInfo->EvalToTmp(argEntry, tmp, asg); lvaSetVarAddrExposed(tmp DEBUGARG(AddressExposedReason::TOO_CONSERVATIVE)); #endif // !TARGET_X86 } #if FEATURE_MULTIREG_ARGS if (isStructArg) { if (((argEntry->numRegs + argEntry->GetStackSlotsNumber()) > 1) || (isHfaArg && argx->TypeGet() == TYP_STRUCT)) { hasMultiregStructArgs = true; } } #ifdef TARGET_ARM else if ((argEntry->argType == TYP_LONG) || (argEntry->argType == TYP_DOUBLE)) { assert((argEntry->numRegs == 2) || (argEntry->numSlots == 2)); } #endif else { // We must have exactly one register or slot. assert(((argEntry->numRegs == 1) && (argEntry->GetStackSlotsNumber() == 0)) || ((argEntry->numRegs == 0) && (argEntry->GetStackSlotsNumber() == 1))); } #endif #if defined(TARGET_X86) if (isStructArg) { GenTree* lclNode = argx->OperIs(GT_LCL_VAR) ? argx : fgIsIndirOfAddrOfLocal(argx); if ((lclNode != nullptr) && (lvaGetPromotionType(lclNode->AsLclVarCommon()->GetLclNum()) == Compiler::PROMOTION_TYPE_INDEPENDENT)) { // Make a GT_FIELD_LIST of the field lclVars. GenTreeLclVarCommon* lcl = lclNode->AsLclVarCommon(); LclVarDsc* varDsc = lvaGetDesc(lcl); GenTreeFieldList* fieldList = new (this, GT_FIELD_LIST) GenTreeFieldList(); fgArgTabEntry* fp = gtArgEntryByNode(call, argx); args->SetNode(fieldList); assert(fp->GetNode() == fieldList); for (unsigned fieldLclNum = varDsc->lvFieldLclStart; fieldLclNum < varDsc->lvFieldLclStart + varDsc->lvFieldCnt; ++fieldLclNum) { LclVarDsc* fieldVarDsc = lvaGetDesc(fieldLclNum); GenTree* fieldLcl; if (fieldLclNum == varDsc->lvFieldLclStart) { lcl->SetLclNum(fieldLclNum); lcl->SetOperResetFlags(GT_LCL_VAR); lcl->gtType = fieldVarDsc->TypeGet(); fieldLcl = lcl; } else { fieldLcl = gtNewLclvNode(fieldLclNum, fieldVarDsc->TypeGet()); } fieldList->AddField(this, fieldLcl, fieldVarDsc->lvFldOffset, fieldVarDsc->TypeGet()); } } } #endif // TARGET_X86 flagsSummary |= args->GetNode()->gtFlags; } // end foreach argument loop if (!reMorphing) { call->fgArgInfo->ArgsComplete(); } /* Process the function address, if indirect call */ if (call->gtCallType == CT_INDIRECT) { call->gtCallAddr = fgMorphTree(call->gtCallAddr); // Const CSE may create an assignment node here flagsSummary |= call->gtCallAddr->gtFlags; } #if FEATURE_FIXED_OUT_ARGS // Record the outgoing argument size. If the call is a fast tail // call, it will setup its arguments in incoming arg area instead // of the out-going arg area, so we don't need to track the // outgoing arg size. if (!call->IsFastTailCall()) { #if defined(UNIX_AMD64_ABI) // This is currently required for the UNIX ABI to work correctly. opts.compNeedToAlignFrame = true; #endif // UNIX_AMD64_ABI const unsigned outgoingArgSpaceSize = GetOutgoingArgByteSize(call->fgArgInfo->GetNextSlotByteOffset()); #if defined(DEBUG_ARG_SLOTS) unsigned preallocatedArgCount = 0; if (!compMacOsArm64Abi()) { preallocatedArgCount = call->fgArgInfo->GetNextSlotNum(); assert(outgoingArgSpaceSize == preallocatedArgCount * REGSIZE_BYTES); } #endif call->fgArgInfo->SetOutArgSize(max(outgoingArgSpaceSize, MIN_ARG_AREA_FOR_CALL)); #ifdef DEBUG if (verbose) { const fgArgInfo* argInfo = call->fgArgInfo; #if defined(DEBUG_ARG_SLOTS) if (!compMacOsArm64Abi()) { printf("argSlots=%d, preallocatedArgCount=%d, nextSlotNum=%d, nextSlotByteOffset=%d, " "outgoingArgSpaceSize=%d\n", argSlots, preallocatedArgCount, argInfo->GetNextSlotNum(), argInfo->GetNextSlotByteOffset(), outgoingArgSpaceSize); } else { printf("nextSlotByteOffset=%d, outgoingArgSpaceSize=%d\n", argInfo->GetNextSlotByteOffset(), outgoingArgSpaceSize); } #else printf("nextSlotByteOffset=%d, outgoingArgSpaceSize=%d\n", argInfo->GetNextSlotByteOffset(), outgoingArgSpaceSize); #endif } #endif } #endif // FEATURE_FIXED_OUT_ARGS // Clear the ASG and EXCEPT (if possible) flags on the call node call->gtFlags &= ~GTF_ASG; if (!call->OperMayThrow(this)) { call->gtFlags &= ~GTF_EXCEPT; } // Union in the side effect flags from the call's operands call->gtFlags |= flagsSummary & GTF_ALL_EFFECT; // If we are remorphing or don't have any register arguments or other arguments that need // temps, then we don't need to call SortArgs() and EvalArgsToTemps(). // if (!reMorphing && (call->fgArgInfo->HasRegArgs() || call->fgArgInfo->NeedsTemps())) { // Do the 'defer or eval to temp' analysis. call->fgArgInfo->SortArgs(); call->fgArgInfo->EvalArgsToTemps(); } if (hasMultiregStructArgs) { fgMorphMultiregStructArgs(call); } #ifdef DEBUG if (verbose) { JITDUMP("ArgTable for %d.%s after fgMorphArgs:\n", call->gtTreeID, GenTree::OpName(call->gtOper)); call->fgArgInfo->Dump(this); JITDUMP("\n"); } #endif return call; } #ifdef _PREFAST_ #pragma warning(pop) #endif //----------------------------------------------------------------------------- // fgMorphMultiregStructArgs: Locate the TYP_STRUCT arguments and // call fgMorphMultiregStructArg on each of them. // // Arguments: // call : a GenTreeCall node that has one or more TYP_STRUCT arguments\. // // Notes: // We only call fgMorphMultiregStructArg for struct arguments that are not passed as simple types. // It will ensure that the struct arguments are in the correct form. // If this method fails to find any TYP_STRUCT arguments it will assert. // void Compiler::fgMorphMultiregStructArgs(GenTreeCall* call) { bool foundStructArg = false; GenTreeFlags flagsSummary = GTF_EMPTY; #ifdef TARGET_X86 assert(!"Logic error: no MultiregStructArgs for X86"); #endif #if defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI) assert(!"Logic error: no MultiregStructArgs for Windows X64 ABI"); #endif for (GenTreeCall::Use& use : call->Args()) { // For late arguments the arg tree that is overridden is in the gtCallLateArgs list. // For such late args the gtCallArgList contains the setup arg node (evaluating the arg.) // The tree from the gtCallLateArgs list is passed to the callee. The fgArgEntry node contains the mapping // between the nodes in both lists. If the arg is not a late arg, the fgArgEntry->node points to itself, // otherwise points to the list in the late args list. bool isLateArg = (use.GetNode()->gtFlags & GTF_LATE_ARG) != 0; fgArgTabEntry* fgEntryPtr = gtArgEntryByNode(call, use.GetNode()); assert(fgEntryPtr != nullptr); GenTree* argx = fgEntryPtr->GetNode(); GenTreeCall::Use* lateUse = nullptr; GenTree* lateNode = nullptr; if (isLateArg) { for (GenTreeCall::Use& lateArgUse : call->LateArgs()) { GenTree* argNode = lateArgUse.GetNode(); if (argx == argNode) { lateUse = &lateArgUse; lateNode = argNode; break; } } assert((lateUse != nullptr) && (lateNode != nullptr)); } if (!fgEntryPtr->isStruct) { continue; } unsigned size = (fgEntryPtr->numRegs + fgEntryPtr->GetStackSlotsNumber()); if ((size > 1) || (fgEntryPtr->IsHfaArg() && argx->TypeGet() == TYP_STRUCT)) { foundStructArg = true; if (varTypeIsStruct(argx) && !argx->OperIs(GT_FIELD_LIST)) { if (fgEntryPtr->IsHfaRegArg()) { var_types hfaType = fgEntryPtr->GetHfaType(); unsigned structSize; if (argx->OperIs(GT_OBJ)) { structSize = argx->AsObj()->GetLayout()->GetSize(); } else if (varTypeIsSIMD(argx)) { structSize = genTypeSize(argx); } else { assert(argx->OperIs(GT_LCL_VAR)); structSize = lvaGetDesc(argx->AsLclVar())->lvExactSize; } assert(structSize > 0); if (structSize == genTypeSize(hfaType)) { if (argx->OperIs(GT_OBJ)) { argx->SetOper(GT_IND); } argx->gtType = hfaType; } } GenTree* newArgx = fgMorphMultiregStructArg(argx, fgEntryPtr); // Did we replace 'argx' with a new tree? if (newArgx != argx) { // link the new arg node into either the late arg list or the gtCallArgs list if (isLateArg) { lateUse->SetNode(newArgx); } else { use.SetNode(newArgx); } assert(fgEntryPtr->GetNode() == newArgx); } } } } // We should only call this method when we actually have one or more multireg struct args assert(foundStructArg); // Update the flags call->gtFlags |= (flagsSummary & GTF_ALL_EFFECT); } //----------------------------------------------------------------------------- // fgMorphMultiregStructArg: Given a TYP_STRUCT arg from a call argument list, // morph the argument as needed to be passed correctly. // // Arguments: // arg - A GenTree node containing a TYP_STRUCT arg // fgEntryPtr - the fgArgTabEntry information for the current 'arg' // // Notes: // The arg must be a GT_OBJ or GT_LCL_VAR or GT_LCL_FLD of TYP_STRUCT. // If 'arg' is a lclVar passed on the stack, we will ensure that any lclVars that must be on the // stack are marked as doNotEnregister, and then we return. // // If it is passed by register, we mutate the argument into the GT_FIELD_LIST form // which is only used for struct arguments. // // If arg is a LclVar we check if it is struct promoted and has the right number of fields // and if they are at the appropriate offsets we will use the struct promted fields // in the GT_FIELD_LIST nodes that we create. // If we have a GT_LCL_VAR that isn't struct promoted or doesn't meet the requirements // we will use a set of GT_LCL_FLDs nodes to access the various portions of the struct // this also forces the struct to be stack allocated into the local frame. // For the GT_OBJ case will clone the address expression and generate two (or more) // indirections. // Currently the implementation handles ARM64/ARM and will NYI for other architectures. // GenTree* Compiler::fgMorphMultiregStructArg(GenTree* arg, fgArgTabEntry* fgEntryPtr) { assert(varTypeIsStruct(arg->TypeGet())); #if !defined(TARGET_ARMARCH) && !defined(UNIX_AMD64_ABI) NYI("fgMorphMultiregStructArg requires implementation for this target"); #endif #ifdef TARGET_ARM if ((fgEntryPtr->IsSplit() && fgEntryPtr->GetStackSlotsNumber() + fgEntryPtr->numRegs > 4) || (!fgEntryPtr->IsSplit() && fgEntryPtr->GetRegNum() == REG_STK)) #else if (fgEntryPtr->GetRegNum() == REG_STK) #endif { GenTreeLclVarCommon* lcl = nullptr; GenTree* actualArg = arg->gtEffectiveVal(); if (actualArg->OperGet() == GT_OBJ) { if (actualArg->gtGetOp1()->OperIs(GT_ADDR) && actualArg->gtGetOp1()->gtGetOp1()->OperIs(GT_LCL_VAR)) { lcl = actualArg->gtGetOp1()->gtGetOp1()->AsLclVarCommon(); } } else if (actualArg->OperGet() == GT_LCL_VAR) { lcl = actualArg->AsLclVarCommon(); } if (lcl != nullptr) { if (lvaGetPromotionType(lcl->GetLclNum()) == PROMOTION_TYPE_INDEPENDENT) { arg = fgMorphLclArgToFieldlist(lcl); } else if (arg->TypeGet() == TYP_STRUCT) { // If this is a non-register struct, it must be referenced from memory. if (!actualArg->OperIs(GT_OBJ)) { // Create an Obj of the temp to use it as a call argument. arg = gtNewOperNode(GT_ADDR, TYP_I_IMPL, arg); arg = gtNewObjNode(lvaGetStruct(lcl->GetLclNum()), arg); } // Its fields will need to be accessed by address. lvaSetVarDoNotEnregister(lcl->GetLclNum() DEBUG_ARG(DoNotEnregisterReason::IsStructArg)); } } return arg; } #if FEATURE_MULTIREG_ARGS // Examine 'arg' and setup argValue objClass and structSize // const CORINFO_CLASS_HANDLE objClass = gtGetStructHandle(arg); GenTree* argValue = arg; // normally argValue will be arg, but see right below unsigned structSize = 0; if (arg->TypeGet() != TYP_STRUCT) { structSize = genTypeSize(arg->TypeGet()); assert(structSize == info.compCompHnd->getClassSize(objClass)); } else if (arg->OperGet() == GT_OBJ) { GenTreeObj* argObj = arg->AsObj(); const ClassLayout* objLayout = argObj->GetLayout(); structSize = objLayout->GetSize(); assert(structSize == info.compCompHnd->getClassSize(objClass)); // If we have a GT_OBJ of a GT_ADDR then we set argValue to the child node of the GT_ADDR. GenTree* op1 = argObj->gtOp1; if (op1->OperGet() == GT_ADDR) { GenTree* underlyingTree = op1->AsOp()->gtOp1; // Only update to the same type. if (underlyingTree->OperIs(GT_LCL_VAR)) { const LclVarDsc* varDsc = lvaGetDesc(underlyingTree->AsLclVar()); if (ClassLayout::AreCompatible(varDsc->GetLayout(), objLayout)) { argValue = underlyingTree; } } } } else if (arg->OperGet() == GT_LCL_VAR) { LclVarDsc* varDsc = lvaGetDesc(arg->AsLclVarCommon()); structSize = varDsc->lvExactSize; assert(structSize == info.compCompHnd->getClassSize(objClass)); } else { structSize = info.compCompHnd->getClassSize(objClass); } var_types hfaType = TYP_UNDEF; var_types elemType = TYP_UNDEF; unsigned elemCount = 0; unsigned elemSize = 0; var_types type[MAX_ARG_REG_COUNT] = {}; // TYP_UNDEF = 0 hfaType = fgEntryPtr->GetHfaType(); if (varTypeIsValidHfaType(hfaType) && fgEntryPtr->isPassedInFloatRegisters()) { elemType = hfaType; elemSize = genTypeSize(elemType); elemCount = structSize / elemSize; assert(elemSize * elemCount == structSize); for (unsigned inx = 0; inx < elemCount; inx++) { type[inx] = elemType; } } else { assert(structSize <= MAX_ARG_REG_COUNT * TARGET_POINTER_SIZE); BYTE gcPtrs[MAX_ARG_REG_COUNT]; elemCount = roundUp(structSize, TARGET_POINTER_SIZE) / TARGET_POINTER_SIZE; info.compCompHnd->getClassGClayout(objClass, &gcPtrs[0]); for (unsigned inx = 0; inx < elemCount; inx++) { #ifdef UNIX_AMD64_ABI if (gcPtrs[inx] == TYPE_GC_NONE) { type[inx] = GetTypeFromClassificationAndSizes(fgEntryPtr->structDesc.eightByteClassifications[inx], fgEntryPtr->structDesc.eightByteSizes[inx]); } else #endif // UNIX_AMD64_ABI { type[inx] = getJitGCType(gcPtrs[inx]); } } #ifndef UNIX_AMD64_ABI if ((argValue->OperGet() == GT_LCL_FLD) || (argValue->OperGet() == GT_LCL_VAR)) { elemSize = TARGET_POINTER_SIZE; // We can safely widen this to aligned bytes since we are loading from // a GT_LCL_VAR or a GT_LCL_FLD which is properly padded and // lives in the stack frame or will be a promoted field. // structSize = elemCount * TARGET_POINTER_SIZE; } else // we must have a GT_OBJ { assert(argValue->OperGet() == GT_OBJ); // We need to load the struct from an arbitrary address // and we can't read past the end of the structSize // We adjust the last load type here // unsigned remainingBytes = structSize % TARGET_POINTER_SIZE; unsigned lastElem = elemCount - 1; if (remainingBytes != 0) { switch (remainingBytes) { case 1: type[lastElem] = TYP_BYTE; break; case 2: type[lastElem] = TYP_SHORT; break; #if defined(TARGET_ARM64) || defined(UNIX_AMD64_ABI) case 4: type[lastElem] = TYP_INT; break; #endif // (TARGET_ARM64) || (UNIX_AMD64_ABI) default: noway_assert(!"NYI: odd sized struct in fgMorphMultiregStructArg"); break; } } } #endif // !UNIX_AMD64_ABI } // We should still have a TYP_STRUCT assert(varTypeIsStruct(argValue->TypeGet())); GenTreeFieldList* newArg = nullptr; // Are we passing a struct LclVar? // if (argValue->OperGet() == GT_LCL_VAR) { GenTreeLclVarCommon* varNode = argValue->AsLclVarCommon(); unsigned varNum = varNode->GetLclNum(); LclVarDsc* varDsc = lvaGetDesc(varNum); // At this point any TYP_STRUCT LclVar must be an aligned struct // or an HFA struct, both which are passed by value. // assert((varDsc->lvSize() == elemCount * TARGET_POINTER_SIZE) || varDsc->lvIsHfa()); varDsc->lvIsMultiRegArg = true; #ifdef DEBUG if (verbose) { JITDUMP("Multireg struct argument V%02u : ", varNum); fgEntryPtr->Dump(); } #endif // DEBUG #ifndef UNIX_AMD64_ABI // This local variable must match the layout of the 'objClass' type exactly if (varDsc->lvIsHfa() && fgEntryPtr->isPassedInFloatRegisters()) { // We have a HFA struct. noway_assert(elemType == varDsc->GetHfaType()); noway_assert(elemSize == genTypeSize(elemType)); noway_assert(elemCount == (varDsc->lvExactSize / elemSize)); noway_assert(elemSize * elemCount == varDsc->lvExactSize); for (unsigned inx = 0; (inx < elemCount); inx++) { noway_assert(type[inx] == elemType); } } else { #if defined(TARGET_ARM64) // We must have a 16-byte struct (non-HFA) noway_assert(elemCount == 2); #elif defined(TARGET_ARM) noway_assert(elemCount <= 4); #endif for (unsigned inx = 0; inx < elemCount; inx++) { var_types currentGcLayoutType = varDsc->GetLayout()->GetGCPtrType(inx); // We setup the type[inx] value above using the GC info from 'objClass' // This GT_LCL_VAR must have the same GC layout info // if (varTypeIsGC(currentGcLayoutType)) { noway_assert(type[inx] == currentGcLayoutType); } else { // We may have use a small type when we setup the type[inx] values above // We can safely widen this to TYP_I_IMPL type[inx] = TYP_I_IMPL; } } } if (varDsc->lvPromoted && varDsc->lvIsHfa() && fgEntryPtr->isPassedInFloatRegisters()) { bool canMorphToFieldList = true; for (unsigned fldOffset = 0; fldOffset < varDsc->lvExactSize; fldOffset += elemSize) { const unsigned fldVarNum = lvaGetFieldLocal(varDsc, fldOffset); if ((fldVarNum == BAD_VAR_NUM) || !varTypeUsesFloatReg(lvaGetDesc(fldVarNum))) { canMorphToFieldList = false; break; } } if (canMorphToFieldList) { newArg = fgMorphLclArgToFieldlist(varNode); } } else #endif // !UNIX_AMD64_ABI #if defined(TARGET_ARM64) || defined(UNIX_AMD64_ABI) // Is this LclVar a promoted struct with exactly 2 fields? if (varDsc->lvPromoted && (varDsc->lvFieldCnt == 2) && !varDsc->lvIsHfa()) { // See if we have two promoted fields that start at offset 0 and 8? unsigned loVarNum = lvaGetFieldLocal(varDsc, 0); unsigned hiVarNum = lvaGetFieldLocal(varDsc, TARGET_POINTER_SIZE); // Did we find the promoted fields at the necessary offsets? if ((loVarNum != BAD_VAR_NUM) && (hiVarNum != BAD_VAR_NUM)) { LclVarDsc* loVarDsc = lvaGetDesc(loVarNum); LclVarDsc* hiVarDsc = lvaGetDesc(hiVarNum); var_types loType = loVarDsc->lvType; var_types hiType = hiVarDsc->lvType; if ((varTypeIsFloating(loType) != genIsValidFloatReg(fgEntryPtr->GetRegNum(0))) || (varTypeIsFloating(hiType) != genIsValidFloatReg(fgEntryPtr->GetRegNum(1)))) { // TODO-LSRA - It currently doesn't support the passing of floating point LCL_VARS in the integer // registers. So for now we will use GT_LCLFLD's to pass this struct (it won't be enregistered) // JITDUMP("Multireg struct V%02u will be passed using GT_LCLFLD because it has float fields.\n", varNum); // // we call lvaSetVarDoNotEnregister and do the proper transformation below. // } else { // We can use the struct promoted field as the two arguments // Create a new tree for 'arg' // replace the existing LDOBJ(ADDR(LCLVAR)) // with a FIELD_LIST(LCLVAR-LO, FIELD_LIST(LCLVAR-HI, nullptr)) // newArg = new (this, GT_FIELD_LIST) GenTreeFieldList(); newArg->AddField(this, gtNewLclvNode(loVarNum, loType), 0, loType); newArg->AddField(this, gtNewLclvNode(hiVarNum, hiType), TARGET_POINTER_SIZE, hiType); } } } else { // // We will create a list of GT_LCL_FLDs nodes to pass this struct // lvaSetVarDoNotEnregister(varNum DEBUG_ARG(DoNotEnregisterReason::LocalField)); } #elif defined(TARGET_ARM) // Is this LclVar a promoted struct with exactly same size? if (varDsc->lvPromoted && (varDsc->lvFieldCnt == elemCount) && !varDsc->lvIsHfa()) { // See if we have promoted fields? unsigned varNums[4]; bool hasBadVarNum = false; for (unsigned inx = 0; inx < elemCount; inx++) { varNums[inx] = lvaGetFieldLocal(varDsc, TARGET_POINTER_SIZE * inx); if (varNums[inx] == BAD_VAR_NUM) { hasBadVarNum = true; break; } } // Did we find the promoted fields at the necessary offsets? if (!hasBadVarNum) { LclVarDsc* varDscs[4]; var_types varType[4]; bool varIsFloat = false; for (unsigned inx = 0; inx < elemCount; inx++) { varDscs[inx] = lvaGetDesc(varNums[inx]); varType[inx] = varDscs[inx]->lvType; if (varTypeIsFloating(varType[inx])) { // TODO-LSRA - It currently doesn't support the passing of floating point LCL_VARS in the // integer // registers. So for now we will use GT_LCLFLD's to pass this struct (it won't be enregistered) // JITDUMP("Multireg struct V%02u will be passed using GT_LCLFLD because it has float fields.\n", varNum); // // we call lvaSetVarDoNotEnregister and do the proper transformation below. // varIsFloat = true; break; } } if (!varIsFloat) { newArg = fgMorphLclArgToFieldlist(varNode); } } } else { // // We will create a list of GT_LCL_FLDs nodes to pass this struct // lvaSetVarDoNotEnregister(varNum DEBUG_ARG(DoNotEnregisterReason::LocalField)); } #endif // TARGET_ARM } // If we didn't set newarg to a new List Node tree // if (newArg == nullptr) { if (fgEntryPtr->GetRegNum() == REG_STK) { // We leave this stack passed argument alone return arg; } // Are we passing a GT_LCL_FLD (or a GT_LCL_VAR that was not struct promoted ) // A GT_LCL_FLD could also contain a 16-byte struct or HFA struct inside it? // if ((argValue->OperGet() == GT_LCL_FLD) || (argValue->OperGet() == GT_LCL_VAR)) { GenTreeLclVarCommon* varNode = argValue->AsLclVarCommon(); unsigned varNum = varNode->GetLclNum(); LclVarDsc* varDsc = lvaGetDesc(varNum); unsigned baseOffset = varNode->GetLclOffs(); unsigned lastOffset = baseOffset + structSize; // The allocated size of our LocalVar must be at least as big as lastOffset assert(varDsc->lvSize() >= lastOffset); if (varDsc->HasGCPtr()) { // alignment of the baseOffset is required noway_assert((baseOffset % TARGET_POINTER_SIZE) == 0); #ifndef UNIX_AMD64_ABI noway_assert(elemSize == TARGET_POINTER_SIZE); #endif unsigned baseIndex = baseOffset / TARGET_POINTER_SIZE; ClassLayout* layout = varDsc->GetLayout(); for (unsigned inx = 0; (inx < elemCount); inx++) { // The GC information must match what we setup using 'objClass' if (layout->IsGCPtr(baseIndex + inx) || varTypeGCtype(type[inx])) { noway_assert(type[inx] == layout->GetGCPtrType(baseIndex + inx)); } } } else // this varDsc contains no GC pointers { for (unsigned inx = 0; inx < elemCount; inx++) { // The GC information must match what we setup using 'objClass' noway_assert(!varTypeIsGC(type[inx])); } } // // We create a list of GT_LCL_FLDs nodes to pass this struct // lvaSetVarDoNotEnregister(varNum DEBUG_ARG(DoNotEnregisterReason::LocalField)); // Create a new tree for 'arg' // replace the existing LDOBJ(ADDR(LCLVAR)) // with a FIELD_LIST(LCLFLD-LO, LCLFLD-HI) // unsigned offset = baseOffset; newArg = new (this, GT_FIELD_LIST) GenTreeFieldList(); for (unsigned inx = 0; inx < elemCount; inx++) { GenTree* nextLclFld = gtNewLclFldNode(varNum, type[inx], offset); newArg->AddField(this, nextLclFld, offset, type[inx]); offset += genTypeSize(type[inx]); } } // Are we passing a GT_OBJ struct? // else if (argValue->OperGet() == GT_OBJ) { GenTreeObj* argObj = argValue->AsObj(); GenTree* baseAddr = argObj->gtOp1; var_types addrType = baseAddr->TypeGet(); if (baseAddr->OperGet() == GT_ADDR) { GenTree* addrTaken = baseAddr->AsOp()->gtOp1; if (addrTaken->IsLocal()) { GenTreeLclVarCommon* varNode = addrTaken->AsLclVarCommon(); unsigned varNum = varNode->GetLclNum(); // We access non-struct type (for example, long) as a struct type. // Make sure lclVar lives on stack to make sure its fields are accessible by address. lvaSetVarDoNotEnregister(varNum DEBUGARG(DoNotEnregisterReason::LocalField)); } } // Create a new tree for 'arg' // replace the existing LDOBJ(EXPR) // with a FIELD_LIST(IND(EXPR), FIELD_LIST(IND(EXPR+8), nullptr) ...) // newArg = new (this, GT_FIELD_LIST) GenTreeFieldList(); unsigned offset = 0; for (unsigned inx = 0; inx < elemCount; inx++) { GenTree* curAddr = baseAddr; if (offset != 0) { GenTree* baseAddrDup = gtCloneExpr(baseAddr); noway_assert(baseAddrDup != nullptr); curAddr = gtNewOperNode(GT_ADD, addrType, baseAddrDup, gtNewIconNode(offset, TYP_I_IMPL)); } else { curAddr = baseAddr; } GenTree* curItem = gtNewIndir(type[inx], curAddr); // For safety all GT_IND should have at least GT_GLOB_REF set. curItem->gtFlags |= GTF_GLOB_REF; newArg->AddField(this, curItem, offset, type[inx]); offset += genTypeSize(type[inx]); } } } #ifdef DEBUG // If we reach here we should have set newArg to something if (newArg == nullptr) { gtDispTree(argValue); assert(!"Missing case in fgMorphMultiregStructArg"); } #endif noway_assert(newArg != nullptr); #ifdef DEBUG if (verbose) { printf("fgMorphMultiregStructArg created tree:\n"); gtDispTree(newArg); } #endif arg = newArg; // consider calling fgMorphTree(newArg); #endif // FEATURE_MULTIREG_ARGS return arg; } //------------------------------------------------------------------------ // fgMorphLclArgToFieldlist: Morph a GT_LCL_VAR node to a GT_FIELD_LIST of its promoted fields // // Arguments: // lcl - The GT_LCL_VAR node we will transform // // Return value: // The new GT_FIELD_LIST that we have created. // GenTreeFieldList* Compiler::fgMorphLclArgToFieldlist(GenTreeLclVarCommon* lcl) { LclVarDsc* varDsc = lvaGetDesc(lcl); assert(varDsc->lvPromoted); unsigned fieldCount = varDsc->lvFieldCnt; unsigned fieldLclNum = varDsc->lvFieldLclStart; GenTreeFieldList* fieldList = new (this, GT_FIELD_LIST) GenTreeFieldList(); for (unsigned i = 0; i < fieldCount; i++) { LclVarDsc* fieldVarDsc = lvaGetDesc(fieldLclNum); GenTree* lclVar = gtNewLclvNode(fieldLclNum, fieldVarDsc->TypeGet()); fieldList->AddField(this, lclVar, fieldVarDsc->lvFldOffset, fieldVarDsc->TypeGet()); fieldLclNum++; } return fieldList; } //------------------------------------------------------------------------ // fgMakeOutgoingStructArgCopy: make a copy of a struct variable if necessary, // to pass to a callee. // // Arguments: // call - call being processed // args - args for the call // copyBlkClass - class handle for the struct // // The arg is updated if necessary with the copy. // void Compiler::fgMakeOutgoingStructArgCopy(GenTreeCall* call, GenTreeCall::Use* args, CORINFO_CLASS_HANDLE copyBlkClass) { GenTree* argx = args->GetNode(); noway_assert(argx->gtOper != GT_MKREFANY); fgArgTabEntry* argEntry = Compiler::gtArgEntryByNode(call, argx); // If we're optimizing, see if we can avoid making a copy. // // We don't need a copy if this is the last use of an implicit by-ref local. // if (opts.OptimizationEnabled()) { GenTreeLclVar* const lcl = argx->IsImplicitByrefParameterValue(this); if (lcl != nullptr) { const unsigned varNum = lcl->GetLclNum(); LclVarDsc* const varDsc = lvaGetDesc(varNum); const unsigned short totalAppearances = varDsc->lvRefCnt(RCS_EARLY); // We don't have liveness so we rely on other indications of last use. // // We handle these cases: // // * (must not copy) If the call is a tail call, the use is a last use. // We must skip the copy if we have a fast tail call. // // * (may not copy) if the call is noreturn, the use is a last use. // We also check for just one reference here as we are not doing // alias analysis of the call's parameters, or checking if the call // site is not within some try region. // // * (may not copy) if there is exactly one use of the local in the method, // and the call is not in loop, this is a last use. // // fgMightHaveLoop() is expensive; check it last, only if necessary. // if (call->IsTailCall() || // ((totalAppearances == 1) && call->IsNoReturn()) || // ((totalAppearances == 1) && !fgMightHaveLoop())) { args->SetNode(lcl); assert(argEntry->GetNode() == lcl); JITDUMP("did not need to make outgoing copy for last use of implicit byref V%2d\n", varNum); return; } } } JITDUMP("making an outgoing copy for struct arg\n"); if (fgOutgoingArgTemps == nullptr) { fgOutgoingArgTemps = hashBv::Create(this); } unsigned tmp = 0; bool found = false; // Attempt to find a local we have already used for an outgoing struct and reuse it. // We do not reuse within a statement. if (!opts.MinOpts()) { indexType lclNum; FOREACH_HBV_BIT_SET(lclNum, fgOutgoingArgTemps) { LclVarDsc* varDsc = lvaGetDesc((unsigned)lclNum); if (typeInfo::AreEquivalent(varDsc->lvVerTypeInfo, typeInfo(TI_STRUCT, copyBlkClass)) && !fgCurrentlyInUseArgTemps->testBit(lclNum)) { tmp = (unsigned)lclNum; found = true; JITDUMP("reusing outgoing struct arg"); break; } } NEXT_HBV_BIT_SET; } // Create the CopyBlk tree and insert it. if (!found) { // Get a new temp // Here We don't need unsafe value cls check, since the addr of this temp is used only in copyblk. tmp = lvaGrabTemp(true DEBUGARG("by-value struct argument")); lvaSetStruct(tmp, copyBlkClass, false); if (call->IsVarargs()) { lvaSetStructUsedAsVarArg(tmp); } fgOutgoingArgTemps->setBit(tmp); } fgCurrentlyInUseArgTemps->setBit(tmp); // TYP_SIMD structs should not be enregistered, since ABI requires it to be // allocated on stack and address of it needs to be passed. if (lclVarIsSIMDType(tmp)) { // TODO: check if we need this block here or other parts already deal with it. lvaSetVarDoNotEnregister(tmp DEBUGARG(DoNotEnregisterReason::IsStructArg)); } // Create a reference to the temp GenTree* dest = gtNewLclvNode(tmp, lvaTable[tmp].lvType); dest->gtFlags |= (GTF_DONT_CSE | GTF_VAR_DEF); // This is a def of the local, "entire" by construction. // Copy the valuetype to the temp GenTree* copyBlk = gtNewBlkOpNode(dest, argx, false /* not volatile */, true /* copyBlock */); copyBlk = fgMorphCopyBlock(copyBlk); #if FEATURE_FIXED_OUT_ARGS // Do the copy early, and evalute the temp later (see EvalArgsToTemps) // When on Unix create LCL_FLD for structs passed in more than one registers. See fgMakeTmpArgNode GenTree* arg = copyBlk; #else // FEATURE_FIXED_OUT_ARGS // Structs are always on the stack, and thus never need temps // so we have to put the copy and temp all into one expression. argEntry->tmpNum = tmp; GenTree* arg = fgMakeTmpArgNode(argEntry); // Change the expression to "(tmp=val),tmp" arg = gtNewOperNode(GT_COMMA, arg->TypeGet(), copyBlk, arg); #endif // FEATURE_FIXED_OUT_ARGS args->SetNode(arg); call->fgArgInfo->EvalToTmp(argEntry, tmp, arg); } #ifdef TARGET_ARM // See declaration for specification comment. void Compiler::fgAddSkippedRegsInPromotedStructArg(LclVarDsc* varDsc, unsigned firstArgRegNum, regMaskTP* pArgSkippedRegMask) { assert(varDsc->lvPromoted); // There's no way to do these calculations without breaking abstraction and assuming that // integer register arguments are consecutive ints. They are on ARM. // To start, figure out what register contains the last byte of the first argument. LclVarDsc* firstFldVarDsc = lvaGetDesc(varDsc->lvFieldLclStart); unsigned lastFldRegOfLastByte = (firstFldVarDsc->lvFldOffset + firstFldVarDsc->lvExactSize - 1) / TARGET_POINTER_SIZE; ; // Now we're keeping track of the register that the last field ended in; see what registers // subsequent fields start in, and whether any are skipped. // (We assume here the invariant that the fields are sorted in offset order.) for (unsigned fldVarOffset = 1; fldVarOffset < varDsc->lvFieldCnt; fldVarOffset++) { unsigned fldVarNum = varDsc->lvFieldLclStart + fldVarOffset; LclVarDsc* fldVarDsc = lvaGetDesc(fldVarNum); unsigned fldRegOffset = fldVarDsc->lvFldOffset / TARGET_POINTER_SIZE; assert(fldRegOffset >= lastFldRegOfLastByte); // Assuming sorted fields. // This loop should enumerate the offsets of any registers skipped. // Find what reg contains the last byte: // And start at the first register after that. If that isn't the first reg of the current for (unsigned skippedRegOffsets = lastFldRegOfLastByte + 1; skippedRegOffsets < fldRegOffset; skippedRegOffsets++) { // If the register number would not be an arg reg, we're done. if (firstArgRegNum + skippedRegOffsets >= MAX_REG_ARG) return; *pArgSkippedRegMask |= genRegMask(regNumber(firstArgRegNum + skippedRegOffsets)); } lastFldRegOfLastByte = (fldVarDsc->lvFldOffset + fldVarDsc->lvExactSize - 1) / TARGET_POINTER_SIZE; } } #endif // TARGET_ARM /***************************************************************************** * * A little helper used to rearrange nested commutative operations. The * effect is that nested associative, commutative operations are transformed * into a 'left-deep' tree, i.e. into something like this: * * (((a op b) op c) op d) op... */ #if REARRANGE_ADDS void Compiler::fgMoveOpsLeft(GenTree* tree) { GenTree* op1; GenTree* op2; genTreeOps oper; do { op1 = tree->AsOp()->gtOp1; op2 = tree->AsOp()->gtOp2; oper = tree->OperGet(); noway_assert(GenTree::OperIsCommutative(oper)); noway_assert(oper == GT_ADD || oper == GT_XOR || oper == GT_OR || oper == GT_AND || oper == GT_MUL); noway_assert(!varTypeIsFloating(tree->TypeGet()) || !opts.genFPorder); noway_assert(oper == op2->gtOper); // Commutativity doesn't hold if overflow checks are needed if (tree->gtOverflowEx() || op2->gtOverflowEx()) { return; } if (gtIsActiveCSE_Candidate(op2)) { // If we have marked op2 as a CSE candidate, // we can't perform a commutative reordering // because any value numbers that we computed for op2 // will be incorrect after performing a commutative reordering // return; } if (oper == GT_MUL && (op2->gtFlags & GTF_MUL_64RSLT)) { return; } // Check for GTF_ADDRMODE_NO_CSE flag on add/mul Binary Operators if (((oper == GT_ADD) || (oper == GT_MUL)) && ((tree->gtFlags & GTF_ADDRMODE_NO_CSE) != 0)) { return; } if ((tree->gtFlags | op2->gtFlags) & GTF_BOOLEAN) { // We could deal with this, but we were always broken and just hit the assert // below regarding flags, which means it's not frequent, so will just bail out. // See #195514 return; } noway_assert(!tree->gtOverflowEx() && !op2->gtOverflowEx()); GenTree* ad1 = op2->AsOp()->gtOp1; GenTree* ad2 = op2->AsOp()->gtOp2; // Compiler::optOptimizeBools() can create GT_OR of two GC pointers yeilding a GT_INT // We can not reorder such GT_OR trees // if (varTypeIsGC(ad1->TypeGet()) != varTypeIsGC(op2->TypeGet())) { break; } // Don't split up a byref calculation and create a new byref. E.g., // [byref]+ (ref, [int]+ (int, int)) => [byref]+ ([byref]+ (ref, int), int). // Doing this transformation could create a situation where the first // addition (that is, [byref]+ (ref, int) ) creates a byref pointer that // no longer points within the ref object. If a GC happens, the byref won't // get updated. This can happen, for instance, if one of the int components // is negative. It also requires the address generation be in a fully-interruptible // code region. // if (varTypeIsGC(op1->TypeGet()) && op2->TypeGet() == TYP_I_IMPL) { assert(varTypeIsGC(tree->TypeGet()) && (oper == GT_ADD)); break; } /* Change "(x op (y op z))" to "(x op y) op z" */ /* ie. "(op1 op (ad1 op ad2))" to "(op1 op ad1) op ad2" */ GenTree* new_op1 = op2; new_op1->AsOp()->gtOp1 = op1; new_op1->AsOp()->gtOp2 = ad1; /* Change the flags. */ // Make sure we arent throwing away any flags noway_assert((new_op1->gtFlags & ~(GTF_MAKE_CSE | GTF_DONT_CSE | // It is ok that new_op1->gtFlags contains GTF_DONT_CSE flag. GTF_REVERSE_OPS | // The reverse ops flag also can be set, it will be re-calculated GTF_NODE_MASK | GTF_ALL_EFFECT | GTF_UNSIGNED)) == 0); new_op1->gtFlags = (new_op1->gtFlags & (GTF_NODE_MASK | GTF_DONT_CSE)) | // Make sure we propagate GTF_DONT_CSE flag. (op1->gtFlags & GTF_ALL_EFFECT) | (ad1->gtFlags & GTF_ALL_EFFECT); /* Retype new_op1 if it has not/become a GC ptr. */ if (varTypeIsGC(op1->TypeGet())) { noway_assert((varTypeIsGC(tree->TypeGet()) && op2->TypeGet() == TYP_I_IMPL && oper == GT_ADD) || // byref(ref + (int+int)) (varTypeIsI(tree->TypeGet()) && op2->TypeGet() == TYP_I_IMPL && oper == GT_OR)); // int(gcref | int(gcref|intval)) new_op1->gtType = tree->gtType; } else if (varTypeIsGC(ad2->TypeGet())) { // Neither ad1 nor op1 are GC. So new_op1 isnt either noway_assert(op1->gtType == TYP_I_IMPL && ad1->gtType == TYP_I_IMPL); new_op1->gtType = TYP_I_IMPL; } // If new_op1 is a new expression. Assign it a new unique value number. // vnStore is null before the ValueNumber phase has run if (vnStore != nullptr) { // We can only keep the old value number on new_op1 if both op1 and ad2 // have the same non-NoVN value numbers. Since op is commutative, comparing // only ad2 and op1 is enough. if ((op1->gtVNPair.GetLiberal() == ValueNumStore::NoVN) || (ad2->gtVNPair.GetLiberal() == ValueNumStore::NoVN) || (ad2->gtVNPair.GetLiberal() != op1->gtVNPair.GetLiberal())) { new_op1->gtVNPair.SetBoth(vnStore->VNForExpr(nullptr, new_op1->TypeGet())); } } tree->AsOp()->gtOp1 = new_op1; tree->AsOp()->gtOp2 = ad2; /* If 'new_op1' is now the same nested op, process it recursively */ if ((ad1->gtOper == oper) && !ad1->gtOverflowEx()) { fgMoveOpsLeft(new_op1); } /* If 'ad2' is now the same nested op, process it * Instead of recursion, we set up op1 and op2 for the next loop. */ op1 = new_op1; op2 = ad2; } while ((op2->gtOper == oper) && !op2->gtOverflowEx()); return; } #endif /*****************************************************************************/ void Compiler::fgSetRngChkTarget(GenTree* tree, bool delay) { if (tree->OperIs(GT_BOUNDS_CHECK)) { GenTreeBoundsChk* const boundsChk = tree->AsBoundsChk(); BasicBlock* const failBlock = fgSetRngChkTargetInner(boundsChk->gtThrowKind, delay); if (failBlock != nullptr) { boundsChk->gtIndRngFailBB = failBlock; } } else if (tree->OperIs(GT_INDEX_ADDR)) { GenTreeIndexAddr* const indexAddr = tree->AsIndexAddr(); BasicBlock* const failBlock = fgSetRngChkTargetInner(SCK_RNGCHK_FAIL, delay); if (failBlock != nullptr) { indexAddr->gtIndRngFailBB = failBlock; } } else { noway_assert(tree->OperIs(GT_ARR_ELEM, GT_ARR_INDEX)); fgSetRngChkTargetInner(SCK_RNGCHK_FAIL, delay); } } BasicBlock* Compiler::fgSetRngChkTargetInner(SpecialCodeKind kind, bool delay) { if (opts.MinOpts()) { delay = false; } if (!opts.compDbgCode) { if (!delay && !compIsForInlining()) { // Create/find the appropriate "range-fail" label return fgRngChkTarget(compCurBB, kind); } } return nullptr; } /***************************************************************************** * * Expand a GT_INDEX node and fully morph the child operands * * The orginal GT_INDEX node is bashed into the GT_IND node that accesses * the array element. We expand the GT_INDEX node into a larger tree that * evaluates the array base and index. The simplest expansion is a GT_COMMA * with a GT_BOUNDS_CHECK and a GT_IND with a GTF_INX_RNGCHK flag. * For complex array or index expressions one or more GT_COMMA assignments * are inserted so that we only evaluate the array or index expressions once. * * The fully expanded tree is then morphed. This causes gtFoldExpr to * perform local constant prop and reorder the constants in the tree and * fold them. * * We then parse the resulting array element expression in order to locate * and label the constants and variables that occur in the tree. */ const int MAX_ARR_COMPLEXITY = 4; const int MAX_INDEX_COMPLEXITY = 4; GenTree* Compiler::fgMorphArrayIndex(GenTree* tree) { noway_assert(tree->gtOper == GT_INDEX); GenTreeIndex* asIndex = tree->AsIndex(); var_types elemTyp = asIndex->TypeGet(); unsigned elemSize = asIndex->gtIndElemSize; CORINFO_CLASS_HANDLE elemStructType = asIndex->gtStructElemClass; noway_assert(elemTyp != TYP_STRUCT || elemStructType != nullptr); // Fold "cns_str"[cns_index] to ushort constant // NOTE: don't do it for empty string, the operation will fail anyway if (opts.OptimizationEnabled() && asIndex->Arr()->OperIs(GT_CNS_STR) && !asIndex->Arr()->AsStrCon()->IsStringEmptyField() && asIndex->Index()->IsIntCnsFitsInI32()) { const int cnsIndex = static_cast<int>(asIndex->Index()->AsIntConCommon()->IconValue()); if (cnsIndex >= 0) { int length; const char16_t* str = info.compCompHnd->getStringLiteral(asIndex->Arr()->AsStrCon()->gtScpHnd, asIndex->Arr()->AsStrCon()->gtSconCPX, &length); if ((cnsIndex < length) && (str != nullptr)) { GenTree* cnsCharNode = gtNewIconNode(str[cnsIndex], TYP_INT); INDEBUG(cnsCharNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED); return cnsCharNode; } } } #ifdef FEATURE_SIMD if (featureSIMD && varTypeIsStruct(elemTyp) && structSizeMightRepresentSIMDType(elemSize)) { // If this is a SIMD type, this is the point at which we lose the type information, // so we need to set the correct type on the GT_IND. // (We don't care about the base type here, so we only check, but don't retain, the return value). unsigned simdElemSize = 0; if (getBaseJitTypeAndSizeOfSIMDType(elemStructType, &simdElemSize) != CORINFO_TYPE_UNDEF) { assert(simdElemSize == elemSize); elemTyp = getSIMDTypeForSize(elemSize); // This is the new type of the node. tree->gtType = elemTyp; // Now set elemStructType to null so that we don't confuse value numbering. elemStructType = nullptr; } } #endif // FEATURE_SIMD // Set up the array length's offset into lenOffs // And the first element's offset into elemOffs ssize_t lenOffs; ssize_t elemOffs; if (tree->gtFlags & GTF_INX_STRING_LAYOUT) { lenOffs = OFFSETOF__CORINFO_String__stringLen; elemOffs = OFFSETOF__CORINFO_String__chars; tree->gtFlags &= ~GTF_INX_STRING_LAYOUT; // Clear this flag as it is used for GTF_IND_VOLATILE } else { // We have a standard array lenOffs = OFFSETOF__CORINFO_Array__length; elemOffs = OFFSETOF__CORINFO_Array__data; } // In minopts, we expand GT_INDEX to GT_IND(GT_INDEX_ADDR) in order to minimize the size of the IR. As minopts // compilation time is roughly proportional to the size of the IR, this helps keep compilation times down. // Furthermore, this representation typically saves on code size in minopts w.r.t. the complete expansion // performed when optimizing, as it does not require LclVar nodes (which are always stack loads/stores in // minopts). // // When we *are* optimizing, we fully expand GT_INDEX to: // 1. Evaluate the array address expression and store the result in a temp if the expression is complex or // side-effecting. // 2. Evaluate the array index expression and store the result in a temp if the expression is complex or // side-effecting. // 3. Perform an explicit bounds check: GT_BOUNDS_CHECK(index, GT_ARR_LENGTH(array)) // 4. Compute the address of the element that will be accessed: // GT_ADD(GT_ADD(array, firstElementOffset), GT_MUL(index, elementSize)) // 5. Dereference the address with a GT_IND. // // This expansion explicitly exposes the bounds check and the address calculation to the optimizer, which allows // for more straightforward bounds-check removal, CSE, etc. if (opts.MinOpts()) { GenTree* const array = fgMorphTree(asIndex->Arr()); GenTree* const index = fgMorphTree(asIndex->Index()); GenTreeIndexAddr* const indexAddr = new (this, GT_INDEX_ADDR) GenTreeIndexAddr(array, index, elemTyp, elemStructType, elemSize, static_cast<unsigned>(lenOffs), static_cast<unsigned>(elemOffs)); indexAddr->gtFlags |= (array->gtFlags | index->gtFlags) & GTF_ALL_EFFECT; // Mark the indirection node as needing a range check if necessary. // Note this will always be true unless JitSkipArrayBoundCheck() is used if ((indexAddr->gtFlags & GTF_INX_RNGCHK) != 0) { fgSetRngChkTarget(indexAddr); } if (!tree->TypeIs(TYP_STRUCT)) { tree->ChangeOper(GT_IND); } else { DEBUG_DESTROY_NODE(tree); tree = gtNewObjNode(elemStructType, indexAddr); INDEBUG(tree->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED); } GenTreeIndir* const indir = tree->AsIndir(); indir->Addr() = indexAddr; bool canCSE = indir->CanCSE(); indir->gtFlags = GTF_IND_ARR_INDEX | (indexAddr->gtFlags & GTF_ALL_EFFECT); if (!canCSE) { indir->SetDoNotCSE(); } INDEBUG(indexAddr->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED); return indir; } GenTree* arrRef = asIndex->Arr(); GenTree* index = asIndex->Index(); bool chkd = ((tree->gtFlags & GTF_INX_RNGCHK) != 0); // if false, range checking will be disabled bool indexNonFaulting = ((tree->gtFlags & GTF_INX_NOFAULT) != 0); // if true, mark GTF_IND_NONFAULTING bool nCSE = ((tree->gtFlags & GTF_DONT_CSE) != 0); GenTree* arrRefDefn = nullptr; // non-NULL if we need to allocate a temp for the arrRef expression GenTree* indexDefn = nullptr; // non-NULL if we need to allocate a temp for the index expression GenTree* bndsChk = nullptr; // If we're doing range checking, introduce a GT_BOUNDS_CHECK node for the address. if (chkd) { GenTree* arrRef2 = nullptr; // The second copy will be used in array address expression GenTree* index2 = nullptr; // If the arrRef or index expressions involves an assignment, a call or reads from global memory, // then we *must* allocate a temporary in which to "localize" those values, to ensure that the // same values are used in the bounds check and the actual dereference. // Also we allocate the temporary when the expresion is sufficiently complex/expensive. // // Note that if the expression is a GT_FIELD, it has not yet been morphed so its true complexity is // not exposed. Without that condition there are cases of local struct fields that were previously, // needlessly, marked as GTF_GLOB_REF, and when that was fixed, there were some regressions that // were mostly ameliorated by adding this condition. // // Likewise, allocate a temporary if the expression is a GT_LCL_FLD node. These used to be created // after fgMorphArrayIndex from GT_FIELD trees so this preserves the existing behavior. This is // perhaps a decision that should be left to CSE but FX diffs show that it is slightly better to // do this here. if ((arrRef->gtFlags & (GTF_ASG | GTF_CALL | GTF_GLOB_REF)) || gtComplexityExceeds(&arrRef, MAX_ARR_COMPLEXITY) || arrRef->OperIs(GT_FIELD, GT_LCL_FLD)) { unsigned arrRefTmpNum = lvaGrabTemp(true DEBUGARG("arr expr")); arrRefDefn = gtNewTempAssign(arrRefTmpNum, arrRef); arrRef = gtNewLclvNode(arrRefTmpNum, arrRef->TypeGet()); arrRef2 = gtNewLclvNode(arrRefTmpNum, arrRef->TypeGet()); } else { arrRef2 = gtCloneExpr(arrRef); noway_assert(arrRef2 != nullptr); } if ((index->gtFlags & (GTF_ASG | GTF_CALL | GTF_GLOB_REF)) || gtComplexityExceeds(&index, MAX_ARR_COMPLEXITY) || index->OperIs(GT_FIELD, GT_LCL_FLD)) { unsigned indexTmpNum = lvaGrabTemp(true DEBUGARG("index expr")); indexDefn = gtNewTempAssign(indexTmpNum, index); index = gtNewLclvNode(indexTmpNum, index->TypeGet()); index2 = gtNewLclvNode(indexTmpNum, index->TypeGet()); } else { index2 = gtCloneExpr(index); noway_assert(index2 != nullptr); } // Next introduce a GT_BOUNDS_CHECK node var_types bndsChkType = TYP_INT; // By default, try to use 32-bit comparison for array bounds check. #ifdef TARGET_64BIT // The CLI Spec allows an array to be indexed by either an int32 or a native int. In the case // of a 64 bit architecture this means the array index can potentially be a TYP_LONG, so for this case, // the comparison will have to be widen to 64 bits. if (index->TypeGet() == TYP_I_IMPL) { bndsChkType = TYP_I_IMPL; } #endif // TARGET_64BIT GenTree* arrLen = gtNewArrLen(TYP_INT, arrRef, (int)lenOffs, compCurBB); if (bndsChkType != TYP_INT) { arrLen = gtNewCastNode(bndsChkType, arrLen, true, bndsChkType); } GenTreeBoundsChk* arrBndsChk = new (this, GT_BOUNDS_CHECK) GenTreeBoundsChk(index, arrLen, SCK_RNGCHK_FAIL); bndsChk = arrBndsChk; // Now we'll switch to using the second copies for arrRef and index // to compute the address expression arrRef = arrRef2; index = index2; } // Create the "addr" which is "*(arrRef + ((index * elemSize) + elemOffs))" GenTree* addr; #ifdef TARGET_64BIT // Widen 'index' on 64-bit targets if (index->TypeGet() != TYP_I_IMPL) { if (index->OperGet() == GT_CNS_INT) { index->gtType = TYP_I_IMPL; } else { index = gtNewCastNode(TYP_I_IMPL, index, true, TYP_I_IMPL); } } #endif // TARGET_64BIT /* Scale the index value if necessary */ if (elemSize > 1) { GenTree* size = gtNewIconNode(elemSize, TYP_I_IMPL); // Fix 392756 WP7 Crossgen // // During codegen optGetArrayRefScaleAndIndex() makes the assumption that op2 of a GT_MUL node // is a constant and is not capable of handling CSE'ing the elemSize constant into a lclvar. // Hence to prevent the constant from becoming a CSE we mark it as NO_CSE. // size->gtFlags |= GTF_DONT_CSE; /* Multiply by the array element size */ addr = gtNewOperNode(GT_MUL, TYP_I_IMPL, index, size); } else { addr = index; } // Be careful to only create the byref pointer when the full index expression is added to the array reference. // We don't want to create a partial byref address expression that doesn't include the full index offset: // a byref must point within the containing object. It is dangerous (especially when optimizations come into // play) to create a "partial" byref that doesn't point exactly to the correct object; there is risk that // the partial byref will not point within the object, and thus not get updated correctly during a GC. // This is mostly a risk in fully-interruptible code regions. // We can generate two types of trees for "addr": // // 1) "arrRef + (index + elemOffset)" // 2) "(arrRef + elemOffset) + index" // // XArch has powerful addressing modes such as [base + index*scale + offset] so it's fine with 1), // while for Arm we better try to make an invariant sub-tree as large as possible, which is usually // "(arrRef + elemOffset)" and is CSE/LoopHoisting friendly => produces better codegen. // 2) should still be safe from GC's point of view since both ADD operations are byref and point to // within the object so GC will be able to correctly track and update them. bool groupArrayRefWithElemOffset = false; #ifdef TARGET_ARMARCH groupArrayRefWithElemOffset = true; // TODO: in some cases even on ARM we better use 1) shape because if "index" is invariant and "arrRef" is not // we at least will be able to hoist/CSE "index + elemOffset" in some cases. // See https://github.com/dotnet/runtime/pull/61293#issuecomment-964146497 // Use 2) form only for primitive types for now - it significantly reduced number of size regressions if (!varTypeIsIntegral(elemTyp) && !varTypeIsFloating(elemTyp)) { groupArrayRefWithElemOffset = false; } #endif // First element's offset GenTree* elemOffset = gtNewIconNode(elemOffs, TYP_I_IMPL); if (groupArrayRefWithElemOffset) { GenTree* basePlusOffset = gtNewOperNode(GT_ADD, TYP_BYREF, arrRef, elemOffset); addr = gtNewOperNode(GT_ADD, TYP_BYREF, basePlusOffset, addr); } else { addr = gtNewOperNode(GT_ADD, TYP_I_IMPL, addr, elemOffset); addr = gtNewOperNode(GT_ADD, TYP_BYREF, arrRef, addr); } assert(((tree->gtDebugFlags & GTF_DEBUG_NODE_LARGE) != 0) || (GenTree::s_gtNodeSizes[GT_IND] == TREE_NODE_SZ_SMALL)); // Change the orginal GT_INDEX node into a GT_IND node tree->SetOper(GT_IND); // If the index node is a floating-point type, notify the compiler // we'll potentially use floating point registers at the time of codegen. if (varTypeUsesFloatReg(tree->gtType)) { this->compFloatingPointUsed = true; } // We've now consumed the GTF_INX_RNGCHK and GTF_INX_NOFAULT, and the node // is no longer a GT_INDEX node. tree->gtFlags &= ~(GTF_INX_RNGCHK | GTF_INX_NOFAULT); tree->AsOp()->gtOp1 = addr; // This is an array index expression. tree->gtFlags |= GTF_IND_ARR_INDEX; // If there's a bounds check, the indir won't fault. if (bndsChk || indexNonFaulting) { tree->gtFlags |= GTF_IND_NONFAULTING; } else { tree->gtFlags |= GTF_EXCEPT; } if (nCSE) { tree->gtFlags |= GTF_DONT_CSE; } // Store information about it. GetArrayInfoMap()->Set(tree, ArrayInfo(elemTyp, elemSize, (int)elemOffs, elemStructType)); // Remember this 'indTree' that we just created, as we still need to attach the fieldSeq information to it. GenTree* indTree = tree; // Did we create a bndsChk tree? if (bndsChk) { // Use a GT_COMMA node to prepend the array bound check // tree = gtNewOperNode(GT_COMMA, elemTyp, bndsChk, tree); /* Mark the indirection node as needing a range check */ fgSetRngChkTarget(bndsChk); } if (indexDefn != nullptr) { // Use a GT_COMMA node to prepend the index assignment // tree = gtNewOperNode(GT_COMMA, tree->TypeGet(), indexDefn, tree); } if (arrRefDefn != nullptr) { // Use a GT_COMMA node to prepend the arRef assignment // tree = gtNewOperNode(GT_COMMA, tree->TypeGet(), arrRefDefn, tree); } JITDUMP("fgMorphArrayIndex (before remorph):\n") DISPTREE(tree) // Currently we morph the tree to perform some folding operations prior // to attaching fieldSeq info and labeling constant array index contributions // tree = fgMorphTree(tree); JITDUMP("fgMorphArrayIndex (after remorph):\n") DISPTREE(tree) // Ideally we just want to proceed to attaching fieldSeq info and labeling the // constant array index contributions, but the morphing operation may have changed // the 'tree' into something that now unconditionally throws an exception. // // In such case the gtEffectiveVal could be a new tree or it's gtOper could be modified // or it could be left unchanged. If it is unchanged then we should not return, // instead we should proceed to attaching fieldSeq info, etc... // GenTree* arrElem = tree->gtEffectiveVal(); if (fgIsCommaThrow(tree)) { if ((arrElem != indTree) || // A new tree node may have been created (!indTree->OperIs(GT_IND))) // The GT_IND may have been changed to a GT_CNS_INT { return tree; // Just return the Comma-Throw, don't try to attach the fieldSeq info, etc.. } } assert(!fgGlobalMorph || (arrElem->gtDebugFlags & GTF_DEBUG_NODE_MORPHED)); DBEXEC(fgGlobalMorph && (arrElem == tree), tree->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED) addr = arrElem->gtGetOp1(); GenTree* cnsOff = nullptr; if (addr->OperIs(GT_ADD)) { GenTree* addrOp1 = addr->gtGetOp1(); if (groupArrayRefWithElemOffset) { if (addrOp1->OperIs(GT_ADD) && addrOp1->gtGetOp2()->IsCnsIntOrI()) { assert(addrOp1->gtGetOp1()->TypeIs(TYP_REF)); cnsOff = addrOp1->gtGetOp2(); addr = addr->gtGetOp2(); // Label any constant array index contributions with #ConstantIndex and any LclVars with // GTF_VAR_ARR_INDEX addr->LabelIndex(this); } else { assert(addr->gtGetOp2()->IsCnsIntOrI()); cnsOff = addr->gtGetOp2(); addr = nullptr; } } else { assert(addr->TypeIs(TYP_BYREF)); assert(addr->gtGetOp1()->TypeIs(TYP_REF)); addr = addr->gtGetOp2(); // Look for the constant [#FirstElem] node here, or as the RHS of an ADD. if (addr->IsCnsIntOrI()) { cnsOff = addr; addr = nullptr; } else { if ((addr->OperIs(GT_ADD)) && addr->gtGetOp2()->IsCnsIntOrI()) { cnsOff = addr->gtGetOp2(); addr = addr->gtGetOp1(); } // Label any constant array index contributions with #ConstantIndex and any LclVars with // GTF_VAR_ARR_INDEX addr->LabelIndex(this); } } } else if (addr->IsCnsIntOrI()) { cnsOff = addr; } FieldSeqNode* firstElemFseq = GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField); if ((cnsOff != nullptr) && (cnsOff->AsIntCon()->gtIconVal == elemOffs)) { // Assign it the [#FirstElem] field sequence // cnsOff->AsIntCon()->gtFieldSeq = firstElemFseq; } else // We have folded the first element's offset with the index expression { // Build the [#ConstantIndex, #FirstElem] field sequence // FieldSeqNode* constantIndexFseq = GetFieldSeqStore()->CreateSingleton(FieldSeqStore::ConstantIndexPseudoField); FieldSeqNode* fieldSeq = GetFieldSeqStore()->Append(constantIndexFseq, firstElemFseq); if (cnsOff == nullptr) // It must have folded into a zero offset { // Record in the general zero-offset map. fgAddFieldSeqForZeroOffset(addr, fieldSeq); } else { cnsOff->AsIntCon()->gtFieldSeq = fieldSeq; } } return tree; } #ifdef TARGET_X86 /***************************************************************************** * * Wrap fixed stack arguments for varargs functions to go through varargs * cookie to access them, except for the cookie itself. * * Non-x86 platforms are allowed to access all arguments directly * so we don't need this code. * */ GenTree* Compiler::fgMorphStackArgForVarArgs(unsigned lclNum, var_types varType, unsigned lclOffs) { /* For the fixed stack arguments of a varargs function, we need to go through the varargs cookies to access them, except for the cookie itself */ LclVarDsc* varDsc = lvaGetDesc(lclNum); if (varDsc->lvIsParam && !varDsc->lvIsRegArg && lclNum != lvaVarargsHandleArg) { // Create a node representing the local pointing to the base of the args GenTree* ptrArg = gtNewOperNode(GT_SUB, TYP_I_IMPL, gtNewLclvNode(lvaVarargsBaseOfStkArgs, TYP_I_IMPL), gtNewIconNode(varDsc->GetStackOffset() - codeGen->intRegState.rsCalleeRegArgCount * REGSIZE_BYTES - lclOffs)); // Access the argument through the local GenTree* tree; if (varTypeIsStruct(varType)) { CORINFO_CLASS_HANDLE typeHnd = varDsc->GetStructHnd(); assert(typeHnd != nullptr); tree = gtNewObjNode(typeHnd, ptrArg); } else { tree = gtNewOperNode(GT_IND, varType, ptrArg); } tree->gtFlags |= GTF_IND_TGTANYWHERE; if (varDsc->IsAddressExposed()) { tree->gtFlags |= GTF_GLOB_REF; } return fgMorphTree(tree); } return NULL; } #endif /***************************************************************************** * * Transform the given GT_LCL_VAR tree for code generation. */ GenTree* Compiler::fgMorphLocalVar(GenTree* tree, bool forceRemorph) { assert(tree->gtOper == GT_LCL_VAR); unsigned lclNum = tree->AsLclVarCommon()->GetLclNum(); var_types varType = lvaGetRealType(lclNum); LclVarDsc* varDsc = lvaGetDesc(lclNum); if (varDsc->IsAddressExposed()) { tree->gtFlags |= GTF_GLOB_REF; } #ifdef TARGET_X86 if (info.compIsVarArgs) { GenTree* newTree = fgMorphStackArgForVarArgs(lclNum, varType, 0); if (newTree != nullptr) { if (newTree->OperIsBlk() && ((tree->gtFlags & GTF_VAR_DEF) == 0)) { newTree->SetOper(GT_IND); } return newTree; } } #endif // TARGET_X86 /* If not during the global morphing phase bail */ if (!fgGlobalMorph && !forceRemorph) { return tree; } bool varAddr = (tree->gtFlags & GTF_DONT_CSE) != 0; noway_assert(!(tree->gtFlags & GTF_VAR_DEF) || varAddr); // GTF_VAR_DEF should always imply varAddr if (!varAddr && varDsc->lvNormalizeOnLoad()) { // TYP_BOOL quirk: previously, the code in optAssertionIsSubrange did not handle TYP_BOOL. // Now it does, but this leads to some regressions because we lose the uniform VNs for trees // that represent the "reduced" normalize-on-load locals, i. e. LCL_VAR(small type V00), created // here with local assertions, and "expanded", i. e. CAST(small type <- LCL_VAR(int V00)). // This is a pretty fundamental problem with how normalize-on-load locals appear to the optimizer. // This quirk preserves the previous behavior. // TODO-CQ: fix the VNs for normalize-on-load locals and remove this quirk. bool isBoolQuirk = varType == TYP_BOOL; // Assertion prop can tell us to omit adding a cast here. This is // useful when the local is a small-typed parameter that is passed in a // register: in that case, the ABI specifies that the upper bits might // be invalid, but the assertion guarantees us that we have normalized // when we wrote it. if (optLocalAssertionProp && !isBoolQuirk && optAssertionIsSubrange(tree, IntegralRange::ForType(varType), apFull) != NO_ASSERTION_INDEX) { // The previous assertion can guarantee us that if this node gets // assigned a register, it will be normalized already. It is still // possible that this node ends up being in memory, in which case // normalization will still be needed, so we better have the right // type. assert(tree->TypeGet() == varDsc->TypeGet()); return tree; } // Small-typed arguments and aliased locals are normalized on load. // Other small-typed locals are normalized on store. // Also, under the debugger as the debugger could write to the variable. // If this is one of the former, insert a narrowing cast on the load. // ie. Convert: var-short --> cast-short(var-int) tree->gtType = TYP_INT; fgMorphTreeDone(tree); tree = gtNewCastNode(TYP_INT, tree, false, varType); fgMorphTreeDone(tree); return tree; } return tree; } /***************************************************************************** Grab a temp for big offset morphing. This method will grab a new temp if no temp of this "type" has been created. Or it will return the same cached one if it has been created. */ unsigned Compiler::fgGetBigOffsetMorphingTemp(var_types type) { unsigned lclNum = fgBigOffsetMorphingTemps[type]; if (lclNum == BAD_VAR_NUM) { // We haven't created a temp for this kind of type. Create one now. lclNum = lvaGrabTemp(false DEBUGARG("Big Offset Morphing")); fgBigOffsetMorphingTemps[type] = lclNum; } else { // We better get the right type. noway_assert(lvaTable[lclNum].TypeGet() == type); } noway_assert(lclNum != BAD_VAR_NUM); return lclNum; } /***************************************************************************** * * Transform the given GT_FIELD tree for code generation. */ GenTree* Compiler::fgMorphField(GenTree* tree, MorphAddrContext* mac) { assert(tree->gtOper == GT_FIELD); CORINFO_FIELD_HANDLE symHnd = tree->AsField()->gtFldHnd; unsigned fldOffset = tree->AsField()->gtFldOffset; GenTree* objRef = tree->AsField()->GetFldObj(); bool fieldMayOverlap = false; bool objIsLocal = false; if (fgGlobalMorph && (objRef != nullptr) && (objRef->gtOper == GT_ADDR)) { // Make sure we've checked if 'objRef' is an address of an implicit-byref parameter. // If it is, fgMorphImplicitByRefArgs may change it do a different opcode, which the // simd field rewrites are sensitive to. fgMorphImplicitByRefArgs(objRef); } noway_assert(((objRef != nullptr) && ((objRef->IsLocalAddrExpr() != nullptr) || (objRef->IsImplicitByrefParameterValue(this) != nullptr))) || (tree->gtFlags & GTF_GLOB_REF) != 0); if (tree->AsField()->gtFldMayOverlap) { fieldMayOverlap = true; // Reset the flag because we may reuse the node. tree->AsField()->gtFldMayOverlap = false; } #ifdef FEATURE_SIMD // if this field belongs to simd struct, translate it to simd intrinsic. if (mac == nullptr) { if (IsBaselineSimdIsaSupported()) { GenTree* newTree = fgMorphFieldToSimdGetElement(tree); if (newTree != tree) { newTree = fgMorphTree(newTree); return newTree; } } } else if ((objRef != nullptr) && (objRef->OperGet() == GT_ADDR) && varTypeIsSIMD(objRef->gtGetOp1())) { GenTreeLclVarCommon* lcl = objRef->IsLocalAddrExpr(); if (lcl != nullptr) { lvaSetVarDoNotEnregister(lcl->GetLclNum() DEBUGARG(DoNotEnregisterReason::LocalField)); } } #endif // Create a default MorphAddrContext early so it doesn't go out of scope // before it is used. MorphAddrContext defMAC(MACK_Ind); /* Is this an instance data member? */ if (objRef) { GenTree* addr; objIsLocal = objRef->IsLocal(); if (tree->gtFlags & GTF_IND_TLS_REF) { NO_WAY("instance field can not be a TLS ref."); } /* We'll create the expression "*(objRef + mem_offs)" */ noway_assert(varTypeIsGC(objRef->TypeGet()) || objRef->TypeGet() == TYP_I_IMPL); /* Now we have a tree like this: +--------------------+ | GT_FIELD | tree +----------+---------+ | +--------------+-------------+ |tree->AsField()->GetFldObj()| +--------------+-------------+ We want to make it like this (when fldOffset is <= MAX_UNCHECKED_OFFSET_FOR_NULL_OBJECT): +--------------------+ | GT_IND/GT_OBJ | tree +---------+----------+ | | +---------+----------+ | GT_ADD | addr +---------+----------+ | / \ / \ / \ +-------------------+ +----------------------+ | objRef | | fldOffset | | | | (when fldOffset !=0) | +-------------------+ +----------------------+ or this (when fldOffset is > MAX_UNCHECKED_OFFSET_FOR_NULL_OBJECT): +--------------------+ | GT_IND/GT_OBJ | tree +----------+---------+ | +----------+---------+ | GT_COMMA | comma2 +----------+---------+ | / \ / \ / \ / \ +---------+----------+ +---------+----------+ comma | GT_COMMA | | "+" (i.e. GT_ADD) | addr +---------+----------+ +---------+----------+ | | / \ / \ / \ / \ / \ / \ +-----+-----+ +-----+-----+ +---------+ +-----------+ asg | GT_ASG | ind | GT_IND | | tmpLcl | | fldOffset | +-----+-----+ +-----+-----+ +---------+ +-----------+ | | / \ | / \ | / \ | +-----+-----+ +-----+-----+ +-----------+ | tmpLcl | | objRef | | tmpLcl | +-----------+ +-----------+ +-----------+ */ var_types objRefType = objRef->TypeGet(); GenTree* comma = nullptr; // NULL mac means we encounter the GT_FIELD first. This denotes a dereference of the field, // and thus is equivalent to a MACK_Ind with zero offset. if (mac == nullptr) { mac = &defMAC; } // This flag is set to enable the "conservative" style of explicit null-check insertion. // This means that we insert an explicit null check whenever we create byref by adding a // constant offset to a ref, in a MACK_Addr context (meaning that the byref is not immediately // dereferenced). The alternative is "aggressive", which would not insert such checks (for // small offsets); in this plan, we would transfer some null-checking responsibility to // callee's of methods taking byref parameters. They would have to add explicit null checks // when creating derived byrefs from argument byrefs by adding constants to argument byrefs, in // contexts where the resulting derived byref is not immediately dereferenced (or if the offset is too // large). To make the "aggressive" scheme work, however, we'd also have to add explicit derived-from-null // checks for byref parameters to "external" methods implemented in C++, and in P/Invoke stubs. // This is left here to point out how to implement it. CLANG_FORMAT_COMMENT_ANCHOR; #define CONSERVATIVE_NULL_CHECK_BYREF_CREATION 1 bool addExplicitNullCheck = false; // Implicit byref locals and string literals are never null. if (fgAddrCouldBeNull(objRef)) { // If the objRef is a GT_ADDR node, it, itself, never requires null checking. The expression // whose address is being taken is either a local or static variable, whose address is necessarily // non-null, or else it is a field dereference, which will do its own bounds checking if necessary. if (objRef->gtOper != GT_ADDR && (mac->m_kind == MACK_Addr || mac->m_kind == MACK_Ind)) { if (!mac->m_allConstantOffsets || fgIsBigOffset(mac->m_totalOffset + fldOffset)) { addExplicitNullCheck = true; } else { // In R2R mode the field offset for some fields may change when the code // is loaded. So we can't rely on a zero offset here to suppress the null check. // // See GitHub issue #16454. bool fieldHasChangeableOffset = false; #ifdef FEATURE_READYTORUN fieldHasChangeableOffset = (tree->AsField()->gtFieldLookup.addr != nullptr); #endif #if CONSERVATIVE_NULL_CHECK_BYREF_CREATION addExplicitNullCheck = (mac->m_kind == MACK_Addr) && ((mac->m_totalOffset + fldOffset > 0) || fieldHasChangeableOffset); #else addExplicitNullCheck = (objRef->gtType == TYP_BYREF && mac->m_kind == MACK_Addr && ((mac->m_totalOffset + fldOffset > 0) || fieldHasChangeableOffset)); #endif } } } if (addExplicitNullCheck) { #ifdef DEBUG if (verbose) { printf("Before explicit null check morphing:\n"); gtDispTree(tree); } #endif // // Create the "comma" subtree // GenTree* asg = nullptr; GenTree* nullchk; unsigned lclNum; if (objRef->gtOper != GT_LCL_VAR) { lclNum = fgGetBigOffsetMorphingTemp(genActualType(objRef->TypeGet())); // Create the "asg" node asg = gtNewTempAssign(lclNum, objRef); } else { lclNum = objRef->AsLclVarCommon()->GetLclNum(); } GenTree* lclVar = gtNewLclvNode(lclNum, objRefType); nullchk = gtNewNullCheck(lclVar, compCurBB); nullchk->gtFlags |= GTF_DONT_CSE; // Don't try to create a CSE for these TYP_BYTE indirections if (asg) { // Create the "comma" node. comma = gtNewOperNode(GT_COMMA, TYP_VOID, // We don't want to return anything from this "comma" node. // Set the type to TYP_VOID, so we can select "cmp" instruction // instead of "mov" instruction later on. asg, nullchk); } else { comma = nullchk; } addr = gtNewLclvNode(lclNum, objRefType); // Use "tmpLcl" to create "addr" node. } else { addr = objRef; } #ifdef FEATURE_READYTORUN if (tree->AsField()->gtFieldLookup.addr != nullptr) { GenTree* offsetNode = nullptr; if (tree->AsField()->gtFieldLookup.accessType == IAT_PVALUE) { offsetNode = gtNewIndOfIconHandleNode(TYP_I_IMPL, (size_t)tree->AsField()->gtFieldLookup.addr, GTF_ICON_CONST_PTR, true); #ifdef DEBUG offsetNode->gtGetOp1()->AsIntCon()->gtTargetHandle = (size_t)symHnd; #endif } else { noway_assert(!"unexpected accessType for R2R field access"); } var_types addType = (objRefType == TYP_I_IMPL) ? TYP_I_IMPL : TYP_BYREF; addr = gtNewOperNode(GT_ADD, addType, addr, offsetNode); } #endif if (fldOffset != 0) { // Generate the "addr" node. /* Add the member offset to the object's address */ FieldSeqNode* fieldSeq = fieldMayOverlap ? FieldSeqStore::NotAField() : GetFieldSeqStore()->CreateSingleton(symHnd); addr = gtNewOperNode(GT_ADD, (var_types)(objRefType == TYP_I_IMPL ? TYP_I_IMPL : TYP_BYREF), addr, gtNewIconHandleNode(fldOffset, GTF_ICON_FIELD_OFF, fieldSeq)); } // Now let's set the "tree" as a GT_IND tree. tree->SetOper(GT_IND); tree->AsOp()->gtOp1 = addr; tree->SetIndirExceptionFlags(this); if (addExplicitNullCheck) { // // Create "comma2" node and link it to "tree". // GenTree* comma2; comma2 = gtNewOperNode(GT_COMMA, addr->TypeGet(), // The type of "comma2" node is the same as the type of "addr" node. comma, addr); tree->AsOp()->gtOp1 = comma2; } #ifdef DEBUG if (verbose) { if (addExplicitNullCheck) { printf("After adding explicit null check:\n"); gtDispTree(tree); } } #endif } else /* This is a static data member */ { if (tree->gtFlags & GTF_IND_TLS_REF) { // Thread Local Storage static field reference // // Field ref is a TLS 'Thread-Local-Storage' reference // // Build this tree: IND(*) # // | // ADD(I_IMPL) // / \. // / CNS(fldOffset) // / // / // / // IND(I_IMPL) == [Base of this DLL's TLS] // | // ADD(I_IMPL) // / \. // / CNS(IdValue*4) or MUL // / / \. // IND(I_IMPL) / CNS(4) // | / // CNS(TLS_HDL,0x2C) IND // | // CNS(pIdAddr) // // # Denotes the orginal node // void** pIdAddr = nullptr; unsigned IdValue = info.compCompHnd->getFieldThreadLocalStoreID(symHnd, (void**)&pIdAddr); // // If we can we access the TLS DLL index ID value directly // then pIdAddr will be NULL and // IdValue will be the actual TLS DLL index ID // GenTree* dllRef = nullptr; if (pIdAddr == nullptr) { if (IdValue != 0) { dllRef = gtNewIconNode(IdValue * 4, TYP_I_IMPL); } } else { dllRef = gtNewIndOfIconHandleNode(TYP_I_IMPL, (size_t)pIdAddr, GTF_ICON_CONST_PTR, true); // Next we multiply by 4 dllRef = gtNewOperNode(GT_MUL, TYP_I_IMPL, dllRef, gtNewIconNode(4, TYP_I_IMPL)); } #define WIN32_TLS_SLOTS (0x2C) // Offset from fs:[0] where the pointer to the slots resides // Mark this ICON as a TLS_HDL, codegen will use FS:[cns] GenTree* tlsRef = gtNewIconHandleNode(WIN32_TLS_SLOTS, GTF_ICON_TLS_HDL); // Translate GTF_FLD_INITCLASS to GTF_ICON_INITCLASS if ((tree->gtFlags & GTF_FLD_INITCLASS) != 0) { tree->gtFlags &= ~GTF_FLD_INITCLASS; tlsRef->gtFlags |= GTF_ICON_INITCLASS; } tlsRef = gtNewOperNode(GT_IND, TYP_I_IMPL, tlsRef); if (dllRef != nullptr) { /* Add the dllRef */ tlsRef = gtNewOperNode(GT_ADD, TYP_I_IMPL, tlsRef, dllRef); } /* indirect to have tlsRef point at the base of the DLLs Thread Local Storage */ tlsRef = gtNewOperNode(GT_IND, TYP_I_IMPL, tlsRef); if (fldOffset != 0) { FieldSeqNode* fieldSeq = fieldMayOverlap ? FieldSeqStore::NotAField() : GetFieldSeqStore()->CreateSingleton(symHnd); GenTree* fldOffsetNode = new (this, GT_CNS_INT) GenTreeIntCon(TYP_INT, fldOffset, fieldSeq); /* Add the TLS static field offset to the address */ tlsRef = gtNewOperNode(GT_ADD, TYP_I_IMPL, tlsRef, fldOffsetNode); } // Final indirect to get to actual value of TLS static field tree->SetOper(GT_IND); tree->AsOp()->gtOp1 = tlsRef; noway_assert(tree->gtFlags & GTF_IND_TLS_REF); } else { assert(!fieldMayOverlap); // Normal static field reference // // If we can we access the static's address directly // then pFldAddr will be NULL and // fldAddr will be the actual address of the static field // void** pFldAddr = nullptr; void* fldAddr = info.compCompHnd->getFieldAddress(symHnd, (void**)&pFldAddr); // We should always be able to access this static field address directly // assert(pFldAddr == nullptr); // For boxed statics, this direct address will be for the box. We have already added // the indirection for the field itself and attached the sequence, in importation. bool isBoxedStatic = gtIsStaticFieldPtrToBoxedStruct(tree->TypeGet(), symHnd); FieldSeqNode* fldSeq = !isBoxedStatic ? GetFieldSeqStore()->CreateSingleton(symHnd) : FieldSeqStore::NotAField(); // TODO-CQ: enable this optimization for 32 bit targets. bool isStaticReadOnlyInited = false; #ifdef TARGET_64BIT if (tree->TypeIs(TYP_REF) && !isBoxedStatic) { bool pIsSpeculative = true; if (info.compCompHnd->getStaticFieldCurrentClass(symHnd, &pIsSpeculative) != NO_CLASS_HANDLE) { isStaticReadOnlyInited = !pIsSpeculative; } } #endif // TARGET_64BIT // TODO: choices made below have mostly historical reasons and // should be unified to always use the IND(<address>) form. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_64BIT bool preferIndir = isBoxedStatic || isStaticReadOnlyInited || (IMAGE_REL_BASED_REL32 != eeGetRelocTypeHint(fldAddr)); #else // !TARGET_64BIT bool preferIndir = isBoxedStatic; #endif // !TARGET_64BIT if (preferIndir) { GenTreeFlags handleKind = GTF_EMPTY; if (isBoxedStatic) { handleKind = GTF_ICON_STATIC_BOX_PTR; } else if (isStaticReadOnlyInited) { handleKind = GTF_ICON_CONST_PTR; } else { handleKind = GTF_ICON_STATIC_HDL; } GenTree* addr = gtNewIconHandleNode((size_t)fldAddr, handleKind, fldSeq); // Translate GTF_FLD_INITCLASS to GTF_ICON_INITCLASS, if we need to. if (((tree->gtFlags & GTF_FLD_INITCLASS) != 0) && !isStaticReadOnlyInited) { tree->gtFlags &= ~GTF_FLD_INITCLASS; addr->gtFlags |= GTF_ICON_INITCLASS; } tree->SetOper(GT_IND); tree->AsOp()->gtOp1 = addr; if (isBoxedStatic) { // The box for the static cannot be null, and is logically invariant, since it // represents (a base for) the static's address. tree->gtFlags |= (GTF_IND_INVARIANT | GTF_IND_NONFAULTING | GTF_IND_NONNULL); } else if (isStaticReadOnlyInited) { JITDUMP("Marking initialized static read-only field '%s' as invariant.\n", eeGetFieldName(symHnd)); // Static readonly field is not null at this point (see getStaticFieldCurrentClass impl). tree->gtFlags |= (GTF_IND_INVARIANT | GTF_IND_NONFAULTING | GTF_IND_NONNULL); } return fgMorphSmpOp(tree); } else { // Only volatile or classinit could be set, and they map over noway_assert((tree->gtFlags & ~(GTF_FLD_VOLATILE | GTF_FLD_INITCLASS | GTF_COMMON_MASK)) == 0); static_assert_no_msg(GTF_FLD_VOLATILE == GTF_CLS_VAR_VOLATILE); static_assert_no_msg(GTF_FLD_INITCLASS == GTF_CLS_VAR_INITCLASS); tree->SetOper(GT_CLS_VAR); tree->AsClsVar()->gtClsVarHnd = symHnd; tree->AsClsVar()->gtFieldSeq = fldSeq; } return tree; } } noway_assert(tree->gtOper == GT_IND); if (fldOffset == 0) { GenTree* addr = tree->AsOp()->gtOp1; // 'addr' may be a GT_COMMA. Skip over any comma nodes addr = addr->gtEffectiveVal(); #ifdef DEBUG if (verbose) { printf("\nBefore calling fgAddFieldSeqForZeroOffset:\n"); gtDispTree(tree); } #endif // We expect 'addr' to be an address at this point. assert(addr->TypeGet() == TYP_BYREF || addr->TypeGet() == TYP_I_IMPL || addr->TypeGet() == TYP_REF); // Since we don't make a constant zero to attach the field sequence to, associate it with the "addr" node. FieldSeqNode* fieldSeq = fieldMayOverlap ? FieldSeqStore::NotAField() : GetFieldSeqStore()->CreateSingleton(symHnd); fgAddFieldSeqForZeroOffset(addr, fieldSeq); } // Pass down the current mac; if non null we are computing an address GenTree* result = fgMorphSmpOp(tree, mac); #ifdef DEBUG if (verbose) { printf("\nFinal value of Compiler::fgMorphField after calling fgMorphSmpOp:\n"); gtDispTree(result); } #endif return result; } //------------------------------------------------------------------------------ // fgMorphCallInline: attempt to inline a call // // Arguments: // call - call expression to inline, inline candidate // inlineResult - result tracking and reporting // // Notes: // Attempts to inline the call. // // If successful, callee's IR is inserted in place of the call, and // is marked with an InlineContext. // // If unsuccessful, the transformations done in anticipation of a // possible inline are undone, and the candidate flag on the call // is cleared. void Compiler::fgMorphCallInline(GenTreeCall* call, InlineResult* inlineResult) { bool inliningFailed = false; // Is this call an inline candidate? if (call->IsInlineCandidate()) { InlineContext* createdContext = nullptr; // Attempt the inline fgMorphCallInlineHelper(call, inlineResult, &createdContext); // We should have made up our minds one way or another.... assert(inlineResult->IsDecided()); // If we failed to inline, we have a bit of work to do to cleanup if (inlineResult->IsFailure()) { if (createdContext != nullptr) { // We created a context before we got to the failure, so mark // it as failed in the tree. createdContext->SetFailed(inlineResult); } else { #ifdef DEBUG // In debug we always put all inline attempts into the inline tree. InlineContext* ctx = m_inlineStrategy->NewContext(call->gtInlineCandidateInfo->inlinersContext, fgMorphStmt, call); ctx->SetFailed(inlineResult); #endif } inliningFailed = true; // Clear the Inline Candidate flag so we can ensure later we tried // inlining all candidates. // call->gtFlags &= ~GTF_CALL_INLINE_CANDIDATE; } } else { // This wasn't an inline candidate. So it must be a GDV candidate. assert(call->IsGuardedDevirtualizationCandidate()); // We already know we can't inline this call, so don't even bother to try. inliningFailed = true; } // If we failed to inline (or didn't even try), do some cleanup. if (inliningFailed) { if (call->gtReturnType != TYP_VOID) { JITDUMP("Inlining [%06u] failed, so bashing " FMT_STMT " to NOP\n", dspTreeID(call), fgMorphStmt->GetID()); // Detach the GT_CALL tree from the original statement by // hanging a "nothing" node to it. Later the "nothing" node will be removed // and the original GT_CALL tree will be picked up by the GT_RET_EXPR node. noway_assert(fgMorphStmt->GetRootNode() == call); fgMorphStmt->SetRootNode(gtNewNothingNode()); } } } //------------------------------------------------------------------------------ // fgMorphCallInlineHelper: Helper to attempt to inline a call // // Arguments: // call - call expression to inline, inline candidate // result - result to set to success or failure // createdContext - The context that was created if the inline attempt got to the inliner. // // Notes: // Attempts to inline the call. // // If successful, callee's IR is inserted in place of the call, and // is marked with an InlineContext. // // If unsuccessful, the transformations done in anticipation of a // possible inline are undone, and the candidate flag on the call // is cleared. // // If a context was created because we got to the importer then it is output by this function. // If the inline succeeded, this context will already be marked as successful. If it failed and // a context is returned, then it will not have been marked as success or failed. void Compiler::fgMorphCallInlineHelper(GenTreeCall* call, InlineResult* result, InlineContext** createdContext) { // Don't expect any surprises here. assert(result->IsCandidate()); if (lvaCount >= MAX_LV_NUM_COUNT_FOR_INLINING) { // For now, attributing this to call site, though it's really // more of a budget issue (lvaCount currently includes all // caller and prospective callee locals). We still might be // able to inline other callees into this caller, or inline // this callee in other callers. result->NoteFatal(InlineObservation::CALLSITE_TOO_MANY_LOCALS); return; } if (call->IsVirtual()) { result->NoteFatal(InlineObservation::CALLSITE_IS_VIRTUAL); return; } // Re-check this because guarded devirtualization may allow these through. if (gtIsRecursiveCall(call) && call->IsImplicitTailCall()) { result->NoteFatal(InlineObservation::CALLSITE_IMPLICIT_REC_TAIL_CALL); return; } // impMarkInlineCandidate() is expected not to mark tail prefixed calls // and recursive tail calls as inline candidates. noway_assert(!call->IsTailPrefixedCall()); noway_assert(!call->IsImplicitTailCall() || !gtIsRecursiveCall(call)); // // Calling inlinee's compiler to inline the method. // unsigned startVars = lvaCount; #ifdef DEBUG if (verbose) { printf("Expanding INLINE_CANDIDATE in statement "); printStmtID(fgMorphStmt); printf(" in " FMT_BB ":\n", compCurBB->bbNum); gtDispStmt(fgMorphStmt); if (call->IsImplicitTailCall()) { printf("Note: candidate is implicit tail call\n"); } } #endif impInlineRoot()->m_inlineStrategy->NoteAttempt(result); // // Invoke the compiler to inline the call. // fgInvokeInlineeCompiler(call, result, createdContext); if (result->IsFailure()) { // Undo some changes made in anticipation of inlining... // Zero out the used locals memset(lvaTable + startVars, 0, (lvaCount - startVars) * sizeof(*lvaTable)); for (unsigned i = startVars; i < lvaCount; i++) { new (&lvaTable[i], jitstd::placement_t()) LclVarDsc(); // call the constructor. } lvaCount = startVars; #ifdef DEBUG if (verbose) { // printf("Inlining failed. Restore lvaCount to %d.\n", lvaCount); } #endif return; } #ifdef DEBUG if (verbose) { // printf("After inlining lvaCount=%d.\n", lvaCount); } #endif } //------------------------------------------------------------------------ // fgCanFastTailCall: Check to see if this tail call can be optimized as epilog+jmp. // // Arguments: // callee - The callee to check // failReason - If this method returns false, the reason why. Can be nullptr. // // Return Value: // Returns true or false based on whether the callee can be fastTailCalled // // Notes: // This function is target specific and each target will make the fastTailCall // decision differently. See the notes below. // // This function calls fgInitArgInfo() to initialize the arg info table, which // is used to analyze the argument. This function can alter the call arguments // by adding argument IR nodes for non-standard arguments. // // Windows Amd64: // A fast tail call can be made whenever the number of callee arguments // is less than or equal to the number of caller arguments, or we have four // or fewer callee arguments. This is because, on Windows AMD64, each // argument uses exactly one register or one 8-byte stack slot. Thus, we only // need to count arguments, and not be concerned with the size of each // incoming or outgoing argument. // // Can fast tail call examples (amd64 Windows): // // -- Callee will have all register arguments -- // caller(int, int, int, int) // callee(int, int, float, int) // // -- Callee requires stack space that is equal or less than the caller -- // caller(struct, struct, struct, struct, struct, struct) // callee(int, int, int, int, int, int) // // -- Callee requires stack space that is less than the caller -- // caller(struct, double, struct, float, struct, struct) // callee(int, int, int, int, int) // // -- Callee will have all register arguments -- // caller(int) // callee(int, int, int, int) // // Cannot fast tail call examples (amd64 Windows): // // -- Callee requires stack space that is larger than the caller -- // caller(struct, double, struct, float, struct, struct) // callee(int, int, int, int, int, double, double, double) // // -- Callee has a byref struct argument -- // caller(int, int, int) // callee(struct(size 3 bytes)) // // Unix Amd64 && Arm64: // A fastTailCall decision can be made whenever the callee's stack space is // less than or equal to the caller's stack space. There are many permutations // of when the caller and callee have different stack sizes if there are // structs being passed to either the caller or callee. // // Exceptions: // If the callee has a 9 to 16 byte struct argument and the callee has // stack arguments, the decision will be to not fast tail call. This is // because before fgMorphArgs is done, the struct is unknown whether it // will be placed on the stack or enregistered. Therefore, the conservative // decision of do not fast tail call is taken. This limitations should be // removed if/when fgMorphArgs no longer depends on fgCanFastTailCall. // // Can fast tail call examples (amd64 Unix): // // -- Callee will have all register arguments -- // caller(int, int, int, int) // callee(int, int, float, int) // // -- Callee requires stack space that is equal to the caller -- // caller({ long, long }, { int, int }, { int }, { int }, { int }, { int }) -- 6 int register arguments, 16 byte // stack // space // callee(int, int, int, int, int, int, int, int) -- 6 int register arguments, 16 byte stack space // // -- Callee requires stack space that is less than the caller -- // caller({ long, long }, int, { long, long }, int, { long, long }, { long, long }) 6 int register arguments, 32 byte // stack // space // callee(int, int, int, int, int, int, { long, long } ) // 6 int register arguments, 16 byte stack space // // -- Callee will have all register arguments -- // caller(int) // callee(int, int, int, int) // // Cannot fast tail call examples (amd64 Unix): // // -- Callee requires stack space that is larger than the caller -- // caller(float, float, float, float, float, float, float, float) -- 8 float register arguments // callee(int, int, int, int, int, int, int, int) -- 6 int register arguments, 16 byte stack space // // -- Callee has structs which cannot be enregistered (Implementation Limitation) -- // caller(float, float, float, float, float, float, float, float, { double, double, double }) -- 8 float register // arguments, 24 byte stack space // callee({ double, double, double }) -- 24 bytes stack space // // -- Callee requires stack space and has a struct argument >8 bytes and <16 bytes (Implementation Limitation) -- // caller(int, int, int, int, int, int, { double, double, double }) -- 6 int register arguments, 24 byte stack space // callee(int, int, int, int, int, int, { int, int }) -- 6 int registers, 16 byte stack space // // -- Caller requires stack space and nCalleeArgs > nCallerArgs (Bug) -- // caller({ double, double, double, double, double, double }) // 48 byte stack // callee(int, int) -- 2 int registers bool Compiler::fgCanFastTailCall(GenTreeCall* callee, const char** failReason) { #if FEATURE_FASTTAILCALL // To reach here means that the return types of the caller and callee are tail call compatible. // In the case of structs that can be returned in a register, compRetNativeType is set to the actual return type. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if (callee->IsTailPrefixedCall()) { var_types retType = info.compRetType; assert(impTailCallRetTypeCompatible(false, retType, info.compMethodInfo->args.retTypeClass, info.compCallConv, (var_types)callee->gtReturnType, callee->gtRetClsHnd, callee->GetUnmanagedCallConv())); } #endif assert(!callee->AreArgsComplete()); fgInitArgInfo(callee); fgArgInfo* argInfo = callee->fgArgInfo; unsigned calleeArgStackSize = 0; unsigned callerArgStackSize = info.compArgStackSize; for (unsigned index = 0; index < argInfo->ArgCount(); ++index) { fgArgTabEntry* arg = argInfo->GetArgEntry(index, false); calleeArgStackSize = roundUp(calleeArgStackSize, arg->GetByteAlignment()); calleeArgStackSize += arg->GetStackByteSize(); } calleeArgStackSize = GetOutgoingArgByteSize(calleeArgStackSize); auto reportFastTailCallDecision = [&](const char* thisFailReason) { if (failReason != nullptr) { *failReason = thisFailReason; } #ifdef DEBUG if ((JitConfig.JitReportFastTailCallDecisions()) == 1) { if (callee->gtCallType != CT_INDIRECT) { const char* methodName; methodName = eeGetMethodFullName(callee->gtCallMethHnd); printf("[Fast tailcall decision]: Caller: %s\n[Fast tailcall decision]: Callee: %s -- Decision: ", info.compFullName, methodName); } else { printf("[Fast tailcall decision]: Caller: %s\n[Fast tailcall decision]: Callee: IndirectCall -- " "Decision: ", info.compFullName); } if (thisFailReason == nullptr) { printf("Will fast tailcall"); } else { printf("Will not fast tailcall (%s)", thisFailReason); } printf(" (CallerArgStackSize: %d, CalleeArgStackSize: %d)\n\n", callerArgStackSize, calleeArgStackSize); } else { if (thisFailReason == nullptr) { JITDUMP("[Fast tailcall decision]: Will fast tailcall\n"); } else { JITDUMP("[Fast tailcall decision]: Will not fast tailcall (%s)\n", thisFailReason); } } #endif // DEBUG }; if (!opts.compFastTailCalls) { reportFastTailCallDecision("Configuration doesn't allow fast tail calls"); return false; } if (callee->IsStressTailCall()) { reportFastTailCallDecision("Fast tail calls are not performed under tail call stress"); return false; } // Note on vararg methods: // If the caller is vararg method, we don't know the number of arguments passed by caller's caller. // But we can be sure that in-coming arg area of vararg caller would be sufficient to hold its // fixed args. Therefore, we can allow a vararg method to fast tail call other methods as long as // out-going area required for callee is bounded by caller's fixed argument space. // // Note that callee being a vararg method is not a problem since we can account the params being passed. // // We will currently decide to not fast tail call on Windows armarch if the caller or callee is a vararg // method. This is due to the ABI differences for native vararg methods for these platforms. There is // work required to shuffle arguments to the correct locations. CLANG_FORMAT_COMMENT_ANCHOR; if (TargetOS::IsWindows && TargetArchitecture::IsArmArch && (info.compIsVarArgs || callee->IsVarargs())) { reportFastTailCallDecision("Fast tail calls with varargs not supported on Windows ARM/ARM64"); return false; } if (compLocallocUsed) { reportFastTailCallDecision("Localloc used"); return false; } #ifdef TARGET_AMD64 // Needed for Jit64 compat. // In future, enabling fast tail calls from methods that need GS cookie // check would require codegen side work to emit GS cookie check before a // tail call. if (getNeedsGSSecurityCookie()) { reportFastTailCallDecision("GS Security cookie check required"); return false; } #endif // If the NextCallReturnAddress intrinsic is used we should do normal calls. if (info.compHasNextCallRetAddr) { reportFastTailCallDecision("Uses NextCallReturnAddress intrinsic"); return false; } if (callee->HasRetBufArg()) // RetBuf { // If callee has RetBuf param, caller too must have it. // Otherwise go the slow route. if (info.compRetBuffArg == BAD_VAR_NUM) { reportFastTailCallDecision("Callee has RetBuf but caller does not."); return false; } } // For a fast tail call the caller will use its incoming arg stack space to place // arguments, so if the callee requires more arg stack space than is available here // the fast tail call cannot be performed. This is common to all platforms. // Note that the GC'ness of on stack args need not match since the arg setup area is marked // as non-interruptible for fast tail calls. if (calleeArgStackSize > callerArgStackSize) { reportFastTailCallDecision("Not enough incoming arg space"); return false; } // For Windows some struct parameters are copied on the local frame // and then passed by reference. We cannot fast tail call in these situation // as we need to keep our frame around. if (fgCallHasMustCopyByrefParameter(callee)) { reportFastTailCallDecision("Callee has a byref parameter"); return false; } reportFastTailCallDecision(nullptr); return true; #else // FEATURE_FASTTAILCALL if (failReason) *failReason = "Fast tailcalls are not supported on this platform"; return false; #endif } //------------------------------------------------------------------------ // fgCallHasMustCopyByrefParameter: Check to see if this call has a byref parameter that // requires a struct copy in the caller. // // Arguments: // callee - The callee to check // // Return Value: // Returns true or false based on whether this call has a byref parameter that // requires a struct copy in the caller. #if FEATURE_FASTTAILCALL bool Compiler::fgCallHasMustCopyByrefParameter(GenTreeCall* callee) { fgArgInfo* argInfo = callee->fgArgInfo; bool hasMustCopyByrefParameter = false; for (unsigned index = 0; index < argInfo->ArgCount(); ++index) { fgArgTabEntry* arg = argInfo->GetArgEntry(index, false); if (arg->isStruct) { if (arg->passedByRef) { // Generally a byref arg will block tail calling, as we have to // make a local copy of the struct for the callee. hasMustCopyByrefParameter = true; // If we're optimizing, we may be able to pass our caller's byref to our callee, // and so still be able to avoid a struct copy. if (opts.OptimizationEnabled()) { // First, see if this arg is an implicit byref param. GenTreeLclVar* const lcl = arg->GetNode()->IsImplicitByrefParameterValue(this); if (lcl != nullptr) { // Yes, the arg is an implicit byref param. const unsigned lclNum = lcl->GetLclNum(); LclVarDsc* const varDsc = lvaGetDesc(lcl); // The param must not be promoted; if we've promoted, then the arg will be // a local struct assembled from the promoted fields. if (varDsc->lvPromoted) { JITDUMP("Arg [%06u] is promoted implicit byref V%02u, so no tail call\n", dspTreeID(arg->GetNode()), lclNum); } else { JITDUMP("Arg [%06u] is unpromoted implicit byref V%02u, seeing if we can still tail call\n", dspTreeID(arg->GetNode()), lclNum); // We have to worry about introducing aliases if we bypass copying // the struct at the call. We'll do some limited analysis to see if we // can rule this out. const unsigned argLimit = 6; // If this is the only appearance of the byref in the method, then // aliasing is not possible. // // If no other call arg refers to this byref, and no other arg is // a pointer which could refer to this byref, we can optimize. // // We only check this for calls with small numbers of arguments, // as the analysis cost will be quadratic. // const unsigned totalAppearances = varDsc->lvRefCnt(RCS_EARLY); const unsigned callAppearances = (unsigned)varDsc->lvRefCntWtd(RCS_EARLY); assert(totalAppearances >= callAppearances); if (totalAppearances == 1) { JITDUMP("... yes, arg is the only appearance of V%02u\n", lclNum); hasMustCopyByrefParameter = false; } else if (totalAppearances > callAppearances) { // lvRefCntWtd tracks the number of appearances of the arg at call sites. // If this number doesn't match the regular ref count, there is // a non-call appearance, and we must be conservative. // JITDUMP("... no, arg has %u non-call appearance(s)\n", totalAppearances - callAppearances); } else if (argInfo->ArgCount() <= argLimit) { JITDUMP("... all %u appearance(s) are as implicit byref args to calls.\n" "... Running alias analysis on this call's args\n", totalAppearances); GenTree* interferingArg = nullptr; for (unsigned index2 = 0; index2 < argInfo->ArgCount(); ++index2) { if (index2 == index) { continue; } fgArgTabEntry* const arg2 = argInfo->GetArgEntry(index2, false); JITDUMP("... checking other arg [%06u]...\n", dspTreeID(arg2->GetNode())); DISPTREE(arg2->GetNode()); // Do we pass 'lcl' more than once to the callee? if (arg2->isStruct && arg2->passedByRef) { GenTreeLclVarCommon* const lcl2 = arg2->GetNode()->IsImplicitByrefParameterValue(this); if ((lcl2 != nullptr) && (lclNum == lcl2->GetLclNum())) { // not copying would introduce aliased implicit byref structs // in the callee ... we can't optimize. interferingArg = arg2->GetNode(); break; } else { JITDUMP("... arg refers to different implicit byref V%02u\n", lcl2->GetLclNum()); continue; } } // Do we pass a byref pointer which might point within 'lcl'? // // We can assume the 'lcl' is unaliased on entry to the // method, so the only way we can have an aliasing byref pointer at // the call is if 'lcl' is address taken/exposed in the method. // // Note even though 'lcl' is not promoted, we are in the middle // of the promote->rewrite->undo->(morph)->demote cycle, and so // might see references to promoted fields of 'lcl' that haven't yet // been demoted (see fgMarkDemotedImplicitByRefArgs). // // So, we also need to scan all 'lcl's fields, if any, to see if they // are exposed. // // When looking for aliases from other args, we check for both TYP_BYREF // and TYP_I_IMPL typed args here. Conceptually anything that points into // an implicit byref parameter should be TYP_BYREF, as these parameters could // refer to boxed heap locations (say if the method is invoked by reflection) // but there are some stack only structs (like typed references) where // the importer/runtime code uses TYP_I_IMPL, and fgInitArgInfo will // transiently retype all simple address-of implicit parameter args as // TYP_I_IMPL. // if ((arg2->argType == TYP_BYREF) || (arg2->argType == TYP_I_IMPL)) { JITDUMP("...arg is a byref, must run an alias check\n"); bool checkExposure = true; bool hasExposure = false; // See if there is any way arg could refer to a parameter struct. GenTree* arg2Node = arg2->GetNode(); if (arg2Node->OperIs(GT_LCL_VAR)) { GenTreeLclVarCommon* arg2LclNode = arg2Node->AsLclVarCommon(); assert(arg2LclNode->GetLclNum() != lclNum); LclVarDsc* arg2Dsc = lvaGetDesc(arg2LclNode); // Other params can't alias implicit byref params if (arg2Dsc->lvIsParam) { checkExposure = false; } } // Because we're checking TYP_I_IMPL above, at least // screen out obvious things that can't cause aliases. else if (arg2Node->IsIntegralConst()) { checkExposure = false; } if (checkExposure) { JITDUMP( "... not sure where byref arg points, checking if V%02u is exposed\n", lclNum); // arg2 might alias arg, see if we've exposed // arg somewhere in the method. if (varDsc->lvHasLdAddrOp || varDsc->IsAddressExposed()) { // Struct as a whole is exposed, can't optimize JITDUMP("... V%02u is exposed\n", lclNum); hasExposure = true; } else if (varDsc->lvFieldLclStart != 0) { // This is the promoted/undone struct case. // // The field start is actually the local number of the promoted local, // use it to enumerate the fields. const unsigned promotedLcl = varDsc->lvFieldLclStart; LclVarDsc* const promotedVarDsc = lvaGetDesc(promotedLcl); JITDUMP("...promoted-unpromoted case -- also checking exposure of " "fields of V%02u\n", promotedLcl); for (unsigned fieldIndex = 0; fieldIndex < promotedVarDsc->lvFieldCnt; fieldIndex++) { LclVarDsc* fieldDsc = lvaGetDesc(promotedVarDsc->lvFieldLclStart + fieldIndex); if (fieldDsc->lvHasLdAddrOp || fieldDsc->IsAddressExposed()) { // Promoted and not yet demoted field is exposed, can't optimize JITDUMP("... field V%02u is exposed\n", promotedVarDsc->lvFieldLclStart + fieldIndex); hasExposure = true; break; } } } } if (hasExposure) { interferingArg = arg2->GetNode(); break; } } else { JITDUMP("...arg is not a byref or implicit byref (%s)\n", varTypeName(arg2->GetNode()->TypeGet())); } } if (interferingArg != nullptr) { JITDUMP("... no, arg [%06u] may alias with V%02u\n", dspTreeID(interferingArg), lclNum); } else { JITDUMP("... yes, no other arg in call can alias V%02u\n", lclNum); hasMustCopyByrefParameter = false; } } else { JITDUMP(" ... no, call has %u > %u args, alias analysis deemed too costly\n", argInfo->ArgCount(), argLimit); } } } } if (hasMustCopyByrefParameter) { // This arg requires a struct copy. No reason to keep scanning the remaining args. break; } } } } return hasMustCopyByrefParameter; } #endif //------------------------------------------------------------------------ // fgMorphPotentialTailCall: Attempt to morph a call that the importer has // identified as a potential tailcall to an actual tailcall and return the // placeholder node to use in this case. // // Arguments: // call - The call to morph. // // Return Value: // Returns a node to use if the call was morphed into a tailcall. If this // function returns a node the call is done being morphed and the new node // should be used. Otherwise the call will have been demoted to a regular call // and should go through normal morph. // // Notes: // This is called only for calls that the importer has already identified as // potential tailcalls. It will do profitability and legality checks and // classify which kind of tailcall we are able to (or should) do, along with // modifying the trees to perform that kind of tailcall. // GenTree* Compiler::fgMorphPotentialTailCall(GenTreeCall* call) { // It should either be an explicit (i.e. tail prefixed) or an implicit tail call assert(call->IsTailPrefixedCall() ^ call->IsImplicitTailCall()); // It cannot be an inline candidate assert(!call->IsInlineCandidate()); auto failTailCall = [&](const char* reason, unsigned lclNum = BAD_VAR_NUM) { #ifdef DEBUG if (verbose) { printf("\nRejecting tail call in morph for call "); printTreeID(call); printf(": %s", reason); if (lclNum != BAD_VAR_NUM) { printf(" V%02u", lclNum); } printf("\n"); } #endif // for non user funcs, we have no handles to report info.compCompHnd->reportTailCallDecision(nullptr, (call->gtCallType == CT_USER_FUNC) ? call->gtCallMethHnd : nullptr, call->IsTailPrefixedCall(), TAILCALL_FAIL, reason); // We have checked the candidate so demote. call->gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL; #if FEATURE_TAILCALL_OPT call->gtCallMoreFlags &= ~GTF_CALL_M_IMPLICIT_TAILCALL; #endif }; if (call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) { failTailCall("Might turn into an intrinsic"); return nullptr; } if (call->IsNoReturn() && !call->IsTailPrefixedCall()) { // Such tail calls always throw an exception and we won't be able to see current // Caller() in the stacktrace. failTailCall("Never returns"); return nullptr; } #ifdef DEBUG if (opts.compGcChecks && (info.compRetType == TYP_REF)) { failTailCall("COMPlus_JitGCChecks or stress might have interposed a call to CORINFO_HELP_CHECK_OBJ, " "invalidating tailcall opportunity"); return nullptr; } #endif // We have to ensure to pass the incoming retValBuf as the // outgoing one. Using a temp will not do as this function will // not regain control to do the copy. This can happen when inlining // a tailcall which also has a potential tailcall in it: the IL looks // like we can do a tailcall, but the trees generated use a temp for the inlinee's // result. TODO-CQ: Fix this. if (info.compRetBuffArg != BAD_VAR_NUM) { noway_assert(call->TypeGet() == TYP_VOID); GenTree* retValBuf = call->gtCallArgs->GetNode(); if (retValBuf->gtOper != GT_LCL_VAR || retValBuf->AsLclVarCommon()->GetLclNum() != info.compRetBuffArg) { failTailCall("Need to copy return buffer"); return nullptr; } } // We are still not sure whether it can be a tail call. Because, when converting // a call to an implicit tail call, we must check that there are no locals with // their address taken. If this is the case, we have to assume that the address // has been leaked and the current stack frame must live until after the final // call. // Verify that none of vars has lvHasLdAddrOp or IsAddressExposed() bit set. Note // that lvHasLdAddrOp is much more conservative. We cannot just base it on // IsAddressExposed() alone since it is not guaranteed to be set on all VarDscs // during morph stage. The reason for also checking IsAddressExposed() is that in case // of vararg methods user args are marked as addr exposed but not lvHasLdAddrOp. // The combination of lvHasLdAddrOp and IsAddressExposed() though conservative allows us // never to be incorrect. // // TODO-Throughput: have a compiler level flag to indicate whether method has vars whose // address is taken. Such a flag could be set whenever lvHasLdAddrOp or IsAddressExposed() // is set. This avoids the need for iterating through all lcl vars of the current // method. Right now throughout the code base we are not consistently using 'set' // method to set lvHasLdAddrOp and IsAddressExposed() flags. bool isImplicitOrStressTailCall = call->IsImplicitTailCall() || call->IsStressTailCall(); if (isImplicitOrStressTailCall && compLocallocUsed) { failTailCall("Localloc used"); return nullptr; } bool hasStructParam = false; for (unsigned varNum = 0; varNum < lvaCount; varNum++) { LclVarDsc* varDsc = lvaGetDesc(varNum); // If the method is marked as an explicit tail call we will skip the // following three hazard checks. // We still must check for any struct parameters and set 'hasStructParam' // so that we won't transform the recursive tail call into a loop. // if (isImplicitOrStressTailCall) { if (varDsc->lvHasLdAddrOp && !lvaIsImplicitByRefLocal(varNum)) { failTailCall("Local address taken", varNum); return nullptr; } if (varDsc->IsAddressExposed()) { if (lvaIsImplicitByRefLocal(varNum)) { // The address of the implicit-byref is a non-address use of the pointer parameter. } else if (varDsc->lvIsStructField && lvaIsImplicitByRefLocal(varDsc->lvParentLcl)) { // The address of the implicit-byref's field is likewise a non-address use of the pointer // parameter. } else if (varDsc->lvPromoted && (lvaTable[varDsc->lvFieldLclStart].lvParentLcl != varNum)) { // This temp was used for struct promotion bookkeeping. It will not be used, and will have // its ref count and address-taken flag reset in fgMarkDemotedImplicitByRefArgs. assert(lvaIsImplicitByRefLocal(lvaTable[varDsc->lvFieldLclStart].lvParentLcl)); assert(fgGlobalMorph); } else { failTailCall("Local address taken", varNum); return nullptr; } } if (varDsc->lvPromoted && varDsc->lvIsParam && !lvaIsImplicitByRefLocal(varNum)) { failTailCall("Has Struct Promoted Param", varNum); return nullptr; } if (varDsc->lvPinned) { // A tail call removes the method from the stack, which means the pinning // goes away for the callee. We can't allow that. failTailCall("Has Pinned Vars", varNum); return nullptr; } } if (varTypeIsStruct(varDsc->TypeGet()) && varDsc->lvIsParam) { hasStructParam = true; // This prevents transforming a recursive tail call into a loop // but doesn't prevent tail call optimization so we need to // look at the rest of parameters. } } if (!fgCheckStmtAfterTailCall()) { failTailCall("Unexpected statements after the tail call"); return nullptr; } const char* failReason = nullptr; bool canFastTailCall = fgCanFastTailCall(call, &failReason); CORINFO_TAILCALL_HELPERS tailCallHelpers; bool tailCallViaJitHelper = false; if (!canFastTailCall) { if (call->IsImplicitTailCall()) { // Implicit or opportunistic tail calls are always dispatched via fast tail call // mechanism and never via tail call helper for perf. failTailCall(failReason); return nullptr; } assert(call->IsTailPrefixedCall()); assert(call->tailCallInfo != nullptr); // We do not currently handle non-standard args except for VSD stubs. if (!call->IsVirtualStub() && call->HasNonStandardAddedArgs(this)) { failTailCall( "Method with non-standard args passed in callee trash register cannot be tail called via helper"); return nullptr; } // On x86 we have a faster mechanism than the general one which we use // in almost all cases. See fgCanTailCallViaJitHelper for more information. if (fgCanTailCallViaJitHelper()) { tailCallViaJitHelper = true; } else { // Make sure we can get the helpers. We do this last as the runtime // will likely be required to generate these. CORINFO_RESOLVED_TOKEN* token = nullptr; CORINFO_SIG_INFO* sig = call->tailCallInfo->GetSig(); unsigned flags = 0; if (!call->tailCallInfo->IsCalli()) { token = call->tailCallInfo->GetToken(); if (call->tailCallInfo->IsCallvirt()) { flags |= CORINFO_TAILCALL_IS_CALLVIRT; } } if (call->gtCallThisArg != nullptr) { var_types thisArgType = call->gtCallThisArg->GetNode()->TypeGet(); if (thisArgType != TYP_REF) { flags |= CORINFO_TAILCALL_THIS_ARG_IS_BYREF; } } if (!info.compCompHnd->getTailCallHelpers(token, sig, (CORINFO_GET_TAILCALL_HELPERS_FLAGS)flags, &tailCallHelpers)) { failTailCall("Tail call help not available"); return nullptr; } } } // Check if we can make the tailcall a loop. bool fastTailCallToLoop = false; #if FEATURE_TAILCALL_OPT // TODO-CQ: enable the transformation when the method has a struct parameter that can be passed in a register // or return type is a struct that can be passed in a register. // // TODO-CQ: if the method being compiled requires generic context reported in gc-info (either through // hidden generic context param or through keep alive thisptr), then while transforming a recursive // call to such a method requires that the generic context stored on stack slot be updated. Right now, // fgMorphRecursiveFastTailCallIntoLoop() is not handling update of generic context while transforming // a recursive call into a loop. Another option is to modify gtIsRecursiveCall() to check that the // generic type parameters of both caller and callee generic method are the same. if (opts.compTailCallLoopOpt && canFastTailCall && gtIsRecursiveCall(call) && !lvaReportParamTypeArg() && !lvaKeepAliveAndReportThis() && !call->IsVirtual() && !hasStructParam && !varTypeIsStruct(call->TypeGet())) { fastTailCallToLoop = true; } #endif // Ok -- now we are committed to performing a tailcall. Report the decision. CorInfoTailCall tailCallResult; if (fastTailCallToLoop) { tailCallResult = TAILCALL_RECURSIVE; } else if (canFastTailCall) { tailCallResult = TAILCALL_OPTIMIZED; } else { tailCallResult = TAILCALL_HELPER; } info.compCompHnd->reportTailCallDecision(nullptr, (call->gtCallType == CT_USER_FUNC) ? call->gtCallMethHnd : nullptr, call->IsTailPrefixedCall(), tailCallResult, nullptr); // Are we currently planning to expand the gtControlExpr as an early virtual call target? // if (call->IsExpandedEarly() && call->IsVirtualVtable()) { // It isn't alway profitable to expand a virtual call early // // We alway expand the TAILCALL_HELPER type late. // And we exapnd late when we have an optimized tail call // and the this pointer needs to be evaluated into a temp. // if (tailCallResult == TAILCALL_HELPER) { // We will alway expand this late in lower instead. // (see LowerTailCallViaJitHelper as it needs some work // for us to be able to expand this earlier in morph) // call->ClearExpandedEarly(); } else if ((tailCallResult == TAILCALL_OPTIMIZED) && ((call->gtCallThisArg->GetNode()->gtFlags & GTF_SIDE_EFFECT) != 0)) { // We generate better code when we expand this late in lower instead. // call->ClearExpandedEarly(); } } // Now actually morph the call. compTailCallUsed = true; // This will prevent inlining this call. call->gtCallMoreFlags |= GTF_CALL_M_TAILCALL; if (tailCallViaJitHelper) { call->gtCallMoreFlags |= GTF_CALL_M_TAILCALL_VIA_JIT_HELPER; } #if FEATURE_TAILCALL_OPT if (fastTailCallToLoop) { call->gtCallMoreFlags |= GTF_CALL_M_TAILCALL_TO_LOOP; } #endif // Mark that this is no longer a pending tailcall. We need to do this before // we call fgMorphCall again (which happens in the fast tailcall case) to // avoid recursing back into this method. call->gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL; #if FEATURE_TAILCALL_OPT call->gtCallMoreFlags &= ~GTF_CALL_M_IMPLICIT_TAILCALL; #endif #ifdef DEBUG if (verbose) { printf("\nGTF_CALL_M_TAILCALL bit set for call "); printTreeID(call); printf("\n"); if (fastTailCallToLoop) { printf("\nGTF_CALL_M_TAILCALL_TO_LOOP bit set for call "); printTreeID(call); printf("\n"); } } #endif // For R2R we might need a different entry point for this call if we are doing a tailcall. // The reason is that the normal delay load helper uses the return address to find the indirection // cell in xarch, but now the JIT is expected to leave the indirection cell in REG_R2R_INDIRECT_PARAM: // We optimize delegate invocations manually in the JIT so skip this for those. if (call->IsR2RRelativeIndir() && canFastTailCall && !fastTailCallToLoop && !call->IsDelegateInvoke()) { info.compCompHnd->updateEntryPointForTailCall(&call->gtEntryPoint); #ifdef TARGET_XARCH // We have already computed arg info to make the fast tailcall decision, but on X64 we now // have to pass the indirection cell, so redo arg info. call->ResetArgInfo(); #endif } // If this block has a flow successor, make suitable updates. // BasicBlock* const nextBlock = compCurBB->GetUniqueSucc(); if (nextBlock == nullptr) { // No unique successor. compCurBB should be a return. // assert(compCurBB->bbJumpKind == BBJ_RETURN); } else { // Flow no longer reaches nextBlock from here. // fgRemoveRefPred(nextBlock, compCurBB); // Adjust profile weights. // // Note if this is a tail call to loop, further updates // are needed once we install the loop edge. // if (compCurBB->hasProfileWeight() && nextBlock->hasProfileWeight()) { // Since we have linear flow we can update the next block weight. // weight_t const blockWeight = compCurBB->bbWeight; weight_t const nextWeight = nextBlock->bbWeight; weight_t const newNextWeight = nextWeight - blockWeight; // If the math would result in a negative weight then there's // no local repair we can do; just leave things inconsistent. // if (newNextWeight >= 0) { // Note if we'd already morphed the IR in nextblock we might // have done something profile sensitive that we should arguably reconsider. // JITDUMP("Reducing profile weight of " FMT_BB " from " FMT_WT " to " FMT_WT "\n", nextBlock->bbNum, nextWeight, newNextWeight); nextBlock->setBBProfileWeight(newNextWeight); } else { JITDUMP("Not reducing profile weight of " FMT_BB " as its weight " FMT_WT " is less than direct flow pred " FMT_BB " weight " FMT_WT "\n", nextBlock->bbNum, nextWeight, compCurBB->bbNum, blockWeight); } // If nextBlock is not a BBJ_RETURN, it should have a unique successor that // is a BBJ_RETURN, as we allow a little bit of flow after a tail call. // if (nextBlock->bbJumpKind != BBJ_RETURN) { BasicBlock* retBlock = nextBlock->GetUniqueSucc(); // Check if we have a sequence of GT_ASG blocks where the same variable is assigned // to temp locals over and over. // Also allow casts on the RHSs of the assignments, and blocks with GT_NOPs. // // { GT_ASG(t_0, GT_CALL(...)) } // { GT_ASG(t_1, t0) } (with casts on rhs potentially) // ... // { GT_ASG(t_n, t_(n - 1)) } // { GT_RET t_n } // if (retBlock->bbJumpKind != BBJ_RETURN) { // Make sure the block has a single statement assert(nextBlock->firstStmt() == nextBlock->lastStmt()); // And the root node is "ASG(LCL_VAR, LCL_VAR)" GenTree* asgNode = nextBlock->firstStmt()->GetRootNode(); assert(asgNode->OperIs(GT_ASG)); unsigned lcl = asgNode->gtGetOp1()->AsLclVarCommon()->GetLclNum(); while (retBlock->bbJumpKind != BBJ_RETURN) { #ifdef DEBUG Statement* nonEmptyStmt = nullptr; for (Statement* const stmt : retBlock->Statements()) { // Ignore NOP statements if (!stmt->GetRootNode()->OperIs(GT_NOP)) { // Only a single non-NOP statement is allowed assert(nonEmptyStmt == nullptr); nonEmptyStmt = stmt; } } if (nonEmptyStmt != nullptr) { asgNode = nonEmptyStmt->GetRootNode(); if (!asgNode->OperIs(GT_NOP)) { assert(asgNode->OperIs(GT_ASG)); GenTree* rhs = asgNode->gtGetOp2(); while (rhs->OperIs(GT_CAST)) { assert(!rhs->gtOverflow()); rhs = rhs->gtGetOp1(); } assert(lcl == rhs->AsLclVarCommon()->GetLclNum()); lcl = asgNode->gtGetOp1()->AsLclVarCommon()->GetLclNum(); } } #endif retBlock = retBlock->GetUniqueSucc(); } } assert(retBlock->bbJumpKind == BBJ_RETURN); if (retBlock->hasProfileWeight()) { // Do similar updates here. // weight_t const nextNextWeight = retBlock->bbWeight; weight_t const newNextNextWeight = nextNextWeight - blockWeight; // If the math would result in an negative weight then there's // no local repair we can do; just leave things inconsistent. // if (newNextNextWeight >= 0) { JITDUMP("Reducing profile weight of " FMT_BB " from " FMT_WT " to " FMT_WT "\n", retBlock->bbNum, nextNextWeight, newNextNextWeight); retBlock->setBBProfileWeight(newNextNextWeight); } else { JITDUMP("Not reducing profile weight of " FMT_BB " as its weight " FMT_WT " is less than direct flow pred " FMT_BB " weight " FMT_WT "\n", retBlock->bbNum, nextNextWeight, compCurBB->bbNum, blockWeight); } } } } } #if !FEATURE_TAILCALL_OPT_SHARED_RETURN // We enable shared-ret tail call optimization for recursive calls even if // FEATURE_TAILCALL_OPT_SHARED_RETURN is not defined. if (gtIsRecursiveCall(call)) #endif { // Many tailcalls will have call and ret in the same block, and thus be // BBJ_RETURN, but if the call falls through to a ret, and we are doing a // tailcall, change it here. compCurBB->bbJumpKind = BBJ_RETURN; } GenTree* stmtExpr = fgMorphStmt->GetRootNode(); #ifdef DEBUG // Tail call needs to be in one of the following IR forms // Either a call stmt or // GT_RETURN(GT_CALL(..)) or GT_RETURN(GT_CAST(GT_CALL(..))) // var = GT_CALL(..) or var = (GT_CAST(GT_CALL(..))) // GT_COMMA(GT_CALL(..), GT_NOP) or GT_COMMA(GT_CAST(GT_CALL(..)), GT_NOP) // In the above, // GT_CASTS may be nested. genTreeOps stmtOper = stmtExpr->gtOper; if (stmtOper == GT_CALL) { assert(stmtExpr == call); } else { assert(stmtOper == GT_RETURN || stmtOper == GT_ASG || stmtOper == GT_COMMA); GenTree* treeWithCall; if (stmtOper == GT_RETURN) { treeWithCall = stmtExpr->gtGetOp1(); } else if (stmtOper == GT_COMMA) { // Second operation must be nop. assert(stmtExpr->gtGetOp2()->IsNothingNode()); treeWithCall = stmtExpr->gtGetOp1(); } else { treeWithCall = stmtExpr->gtGetOp2(); } // Peel off casts while (treeWithCall->gtOper == GT_CAST) { assert(!treeWithCall->gtOverflow()); treeWithCall = treeWithCall->gtGetOp1(); } assert(treeWithCall == call); } #endif // Store the call type for later to introduce the correct placeholder. var_types origCallType = call->TypeGet(); GenTree* result; if (!canFastTailCall && !tailCallViaJitHelper) { // For tailcall via CORINFO_TAILCALL_HELPERS we transform into regular // calls with (to the JIT) regular control flow so we do not need to do // much special handling. result = fgMorphTailCallViaHelpers(call, tailCallHelpers); } else { // Otherwise we will transform into something that does not return. For // fast tailcalls a "jump" and for tailcall via JIT helper a call to a // JIT helper that does not return. So peel off everything after the // call. Statement* nextMorphStmt = fgMorphStmt->GetNextStmt(); JITDUMP("Remove all stmts after the call.\n"); while (nextMorphStmt != nullptr) { Statement* stmtToRemove = nextMorphStmt; nextMorphStmt = stmtToRemove->GetNextStmt(); fgRemoveStmt(compCurBB, stmtToRemove); } bool isRootReplaced = false; GenTree* root = fgMorphStmt->GetRootNode(); if (root != call) { JITDUMP("Replace root node [%06d] with [%06d] tail call node.\n", dspTreeID(root), dspTreeID(call)); isRootReplaced = true; fgMorphStmt->SetRootNode(call); } // Avoid potential extra work for the return (for example, vzeroupper) call->gtType = TYP_VOID; // The runtime requires that we perform a null check on the `this` argument before // tail calling to a virtual dispatch stub. This requirement is a consequence of limitations // in the runtime's ability to map an AV to a NullReferenceException if // the AV occurs in a dispatch stub that has unmanaged caller. if (call->IsVirtualStub()) { call->gtFlags |= GTF_CALL_NULLCHECK; } // Do some target-specific transformations (before we process the args, // etc.) for the JIT helper case. if (tailCallViaJitHelper) { fgMorphTailCallViaJitHelper(call); // Force re-evaluating the argInfo. fgMorphTailCallViaJitHelper will modify the // argument list, invalidating the argInfo. call->fgArgInfo = nullptr; } // Tail call via JIT helper: The VM can't use return address hijacking // if we're not going to return and the helper doesn't have enough info // to safely poll, so we poll before the tail call, if the block isn't // already safe. Since tail call via helper is a slow mechanism it // doen't matter whether we emit GC poll. his is done to be in parity // with Jit64. Also this avoids GC info size increase if all most all // methods are expected to be tail calls (e.g. F#). // // Note that we can avoid emitting GC-poll if we know that the current // BB is dominated by a Gc-SafePoint block. But we don't have dominator // info at this point. One option is to just add a place holder node for // GC-poll (e.g. GT_GCPOLL) here and remove it in lowering if the block // is dominated by a GC-SafePoint. For now it not clear whether // optimizing slow tail calls is worth the effort. As a low cost check, // we check whether the first and current basic blocks are // GC-SafePoints. // // Fast Tail call as epilog+jmp - No need to insert GC-poll. Instead, // fgSetBlockOrder() is going to mark the method as fully interruptible // if the block containing this tail call is reachable without executing // any call. BasicBlock* curBlock = compCurBB; if (canFastTailCall || (fgFirstBB->bbFlags & BBF_GC_SAFE_POINT) || (compCurBB->bbFlags & BBF_GC_SAFE_POINT) || (fgCreateGCPoll(GCPOLL_INLINE, compCurBB) == curBlock)) { // We didn't insert a poll block, so we need to morph the call now // (Normally it will get morphed when we get to the split poll block) GenTree* temp = fgMorphCall(call); noway_assert(temp == call); } // Fast tail call: in case of fast tail calls, we need a jmp epilog and // hence mark it as BBJ_RETURN with BBF_JMP flag set. noway_assert(compCurBB->bbJumpKind == BBJ_RETURN); if (canFastTailCall) { compCurBB->bbFlags |= BBF_HAS_JMP; } else { // We call CORINFO_HELP_TAILCALL which does not return, so we will // not need epilogue. compCurBB->bbJumpKind = BBJ_THROW; } if (isRootReplaced) { // We have replaced the root node of this stmt and deleted the rest, // but we still have the deleted, dead nodes on the `fgMorph*` stack // if the root node was an `ASG`, `RET` or `CAST`. // Return a zero con node to exit morphing of the old trees without asserts // and forbid POST_ORDER morphing doing something wrong with our call. var_types callType; if (varTypeIsStruct(origCallType)) { CORINFO_CLASS_HANDLE retClsHnd = call->gtRetClsHnd; Compiler::structPassingKind howToReturnStruct; callType = getReturnTypeForStruct(retClsHnd, call->GetUnmanagedCallConv(), &howToReturnStruct); assert((howToReturnStruct != SPK_Unknown) && (howToReturnStruct != SPK_ByReference)); if (howToReturnStruct == SPK_ByValue) { callType = TYP_I_IMPL; } else if (howToReturnStruct == SPK_ByValueAsHfa || varTypeIsSIMD(callType)) { callType = TYP_FLOAT; } assert((callType != TYP_UNKNOWN) && !varTypeIsStruct(callType)); } else { callType = origCallType; } assert((callType != TYP_UNKNOWN) && !varTypeIsStruct(callType)); callType = genActualType(callType); GenTree* zero = gtNewZeroConNode(callType); result = fgMorphTree(zero); } else { result = call; } } return result; } //------------------------------------------------------------------------ // fgMorphTailCallViaHelpers: Transform the given GT_CALL tree for tailcall code // generation. // // Arguments: // call - The call to transform // helpers - The tailcall helpers provided by the runtime. // // Return Value: // Returns the transformed node. // // Notes: // This transforms // GT_CALL // {callTarget} // {this} // {args} // into // GT_COMMA // GT_CALL StoreArgsStub // {callTarget} (depending on flags provided by the runtime) // {this} (as a regular arg) // {args} // GT_COMMA // GT_CALL Dispatcher // GT_ADDR ReturnAddress // {CallTargetStub} // GT_ADDR ReturnValue // GT_LCL ReturnValue // whenever the call node returns a value. If the call node does not return a // value the last comma will not be there. // GenTree* Compiler::fgMorphTailCallViaHelpers(GenTreeCall* call, CORINFO_TAILCALL_HELPERS& help) { // R2R requires different handling but we don't support tailcall via // helpers in R2R yet, so just leave it for now. // TODO: R2R: TailCallViaHelper assert(!opts.IsReadyToRun()); JITDUMP("fgMorphTailCallViaHelpers (before):\n"); DISPTREE(call); // Don't support tail calling helper methods assert(call->gtCallType != CT_HELPER); // We come this route only for tail prefixed calls that cannot be dispatched as // fast tail calls assert(!call->IsImplicitTailCall()); // We want to use the following assert, but it can modify the IR in some cases, so we // can't do that in an assert. // assert(!fgCanFastTailCall(call, nullptr)); // We might or might not have called fgInitArgInfo before this point: in // builds with FEATURE_FASTTAILCALL we will have called it when checking if // we could do a fast tailcall, so it is possible we have added extra IR // for non-standard args that we must get rid of. Get rid of that IR here // and do this first as it will 'expose' the retbuf as the first arg, which // we rely upon in fgCreateCallDispatcherAndGetResult. call->ResetArgInfo(); GenTree* callDispatcherAndGetResult = fgCreateCallDispatcherAndGetResult(call, help.hCallTarget, help.hDispatcher); // Change the call to a call to the StoreArgs stub. if (call->HasRetBufArg()) { JITDUMP("Removing retbuf"); call->gtCallArgs = call->gtCallArgs->GetNext(); call->gtCallMoreFlags &= ~GTF_CALL_M_RETBUFFARG; } const bool stubNeedsTargetFnPtr = (help.flags & CORINFO_TAILCALL_STORE_TARGET) != 0; GenTree* doBeforeStoreArgsStub = nullptr; GenTree* thisPtrStubArg = nullptr; // Put 'this' in normal param list if (call->gtCallThisArg != nullptr) { JITDUMP("Moving this pointer into arg list\n"); GenTree* objp = call->gtCallThisArg->GetNode(); GenTree* thisPtr = nullptr; call->gtCallThisArg = nullptr; // JIT will need one or two copies of "this" in the following cases: // 1) the call needs null check; // 2) StoreArgs stub needs the target function pointer address and if the call is virtual // the stub also needs "this" in order to evalute the target. const bool callNeedsNullCheck = call->NeedsNullCheck(); const bool stubNeedsThisPtr = stubNeedsTargetFnPtr && call->IsVirtual(); // TODO-Review: The following transformation is implemented under assumption that // both conditions can be true. However, I could not construct such example // where a virtual tail call would require null check. In case, if the conditions // are mutually exclusive the following could be simplified. if (callNeedsNullCheck || stubNeedsThisPtr) { // Clone "this" if "this" has no side effects. if ((objp->gtFlags & GTF_SIDE_EFFECT) == 0) { thisPtr = gtClone(objp, true); } // Create a temp and spill "this" to the temp if "this" has side effects or "this" was too complex to clone. if (thisPtr == nullptr) { const unsigned lclNum = lvaGrabTemp(true DEBUGARG("tail call thisptr")); // tmp = "this" doBeforeStoreArgsStub = gtNewTempAssign(lclNum, objp); if (callNeedsNullCheck) { // COMMA(tmp = "this", deref(tmp)) GenTree* tmp = gtNewLclvNode(lclNum, objp->TypeGet()); GenTree* nullcheck = gtNewNullCheck(tmp, compCurBB); doBeforeStoreArgsStub = gtNewOperNode(GT_COMMA, TYP_VOID, doBeforeStoreArgsStub, nullcheck); } thisPtr = gtNewLclvNode(lclNum, objp->TypeGet()); if (stubNeedsThisPtr) { thisPtrStubArg = gtNewLclvNode(lclNum, objp->TypeGet()); } } else { if (callNeedsNullCheck) { // deref("this") doBeforeStoreArgsStub = gtNewNullCheck(objp, compCurBB); if (stubNeedsThisPtr) { thisPtrStubArg = gtClone(objp, true); } } else { assert(stubNeedsThisPtr); thisPtrStubArg = objp; } } call->gtFlags &= ~GTF_CALL_NULLCHECK; assert((thisPtrStubArg != nullptr) == stubNeedsThisPtr); } else { thisPtr = objp; } // During rationalization tmp="this" and null check will be materialized // in the right execution order. assert(thisPtr != nullptr); call->gtCallArgs = gtPrependNewCallArg(thisPtr, call->gtCallArgs); } // We may need to pass the target, for instance for calli or generic methods // where we pass instantiating stub. if (stubNeedsTargetFnPtr) { JITDUMP("Adding target since VM requested it\n"); GenTree* target; if (!call->IsVirtual()) { if (call->gtCallType == CT_INDIRECT) { noway_assert(call->gtCallAddr != nullptr); target = call->gtCallAddr; } else { CORINFO_CONST_LOOKUP addrInfo; info.compCompHnd->getFunctionEntryPoint(call->gtCallMethHnd, &addrInfo); CORINFO_GENERIC_HANDLE handle = nullptr; void* pIndirection = nullptr; assert(addrInfo.accessType != IAT_PPVALUE && addrInfo.accessType != IAT_RELPVALUE); if (addrInfo.accessType == IAT_VALUE) { handle = addrInfo.handle; } else if (addrInfo.accessType == IAT_PVALUE) { pIndirection = addrInfo.addr; } target = gtNewIconEmbHndNode(handle, pIndirection, GTF_ICON_FTN_ADDR, call->gtCallMethHnd); } } else { assert(!call->tailCallInfo->GetSig()->hasTypeArg()); CORINFO_CALL_INFO callInfo; unsigned flags = CORINFO_CALLINFO_LDFTN; if (call->tailCallInfo->IsCallvirt()) { flags |= CORINFO_CALLINFO_CALLVIRT; } eeGetCallInfo(call->tailCallInfo->GetToken(), nullptr, (CORINFO_CALLINFO_FLAGS)flags, &callInfo); target = getVirtMethodPointerTree(thisPtrStubArg, call->tailCallInfo->GetToken(), &callInfo); } // Insert target as last arg GenTreeCall::Use** newArgSlot = &call->gtCallArgs; while (*newArgSlot != nullptr) { newArgSlot = &(*newArgSlot)->NextRef(); } *newArgSlot = gtNewCallArgs(target); } // This is now a direct call to the store args stub and not a tailcall. call->gtCallType = CT_USER_FUNC; call->gtCallMethHnd = help.hStoreArgs; call->gtFlags &= ~GTF_CALL_VIRT_KIND_MASK; call->gtCallMoreFlags &= ~(GTF_CALL_M_TAILCALL | GTF_CALL_M_DELEGATE_INV | GTF_CALL_M_WRAPPER_DELEGATE_INV); // The store-args stub returns no value. call->gtRetClsHnd = nullptr; call->gtType = TYP_VOID; call->gtReturnType = TYP_VOID; GenTree* callStoreArgsStub = call; if (doBeforeStoreArgsStub != nullptr) { callStoreArgsStub = gtNewOperNode(GT_COMMA, TYP_VOID, doBeforeStoreArgsStub, callStoreArgsStub); } GenTree* finalTree = gtNewOperNode(GT_COMMA, callDispatcherAndGetResult->TypeGet(), callStoreArgsStub, callDispatcherAndGetResult); finalTree = fgMorphTree(finalTree); JITDUMP("fgMorphTailCallViaHelpers (after):\n"); DISPTREE(finalTree); return finalTree; } //------------------------------------------------------------------------ // fgCreateCallDispatcherAndGetResult: Given a call // CALL // {callTarget} // {retbuf} // {this} // {args} // create a similarly typed node that calls the tailcall dispatcher and returns // the result, as in the following: // COMMA // CALL TailCallDispatcher // ADDR ReturnAddress // &CallTargetFunc // ADDR RetValue // RetValue // If the call has type TYP_VOID, only create the CALL node. // // Arguments: // origCall - the call // callTargetStubHnd - the handle of the CallTarget function (this is a special // IL stub created by the runtime) // dispatcherHnd - the handle of the tailcall dispatcher function // // Return Value: // A node that can be used in place of the original call. // GenTree* Compiler::fgCreateCallDispatcherAndGetResult(GenTreeCall* origCall, CORINFO_METHOD_HANDLE callTargetStubHnd, CORINFO_METHOD_HANDLE dispatcherHnd) { GenTreeCall* callDispatcherNode = gtNewCallNode(CT_USER_FUNC, dispatcherHnd, TYP_VOID, nullptr, fgMorphStmt->GetDebugInfo()); // The dispatcher has signature // void DispatchTailCalls(void* callersRetAddrSlot, void* callTarget, void* retValue) // Add return value arg. GenTree* retValArg; GenTree* retVal = nullptr; unsigned int newRetLcl = BAD_VAR_NUM; GenTree* copyToRetBufNode = nullptr; if (origCall->HasRetBufArg()) { JITDUMP("Transferring retbuf\n"); GenTree* retBufArg = origCall->gtCallArgs->GetNode(); assert(info.compRetBuffArg != BAD_VAR_NUM); assert(retBufArg->OperIsLocal()); assert(retBufArg->AsLclVarCommon()->GetLclNum() == info.compRetBuffArg); // Caller return buffer argument retBufArg can point to GC heap while the dispatcher expects // the return value argument retValArg to point to the stack. // We use a temporary stack allocated return buffer to hold the value during the dispatcher call // and copy the value back to the caller return buffer after that. unsigned int tmpRetBufNum = lvaGrabTemp(true DEBUGARG("substitute local for return buffer")); constexpr bool unsafeValueClsCheck = false; lvaSetStruct(tmpRetBufNum, origCall->gtRetClsHnd, unsafeValueClsCheck); lvaSetVarAddrExposed(tmpRetBufNum DEBUGARG(AddressExposedReason::DISPATCH_RET_BUF)); var_types tmpRetBufType = lvaGetDesc(tmpRetBufNum)->TypeGet(); retValArg = gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(tmpRetBufNum, tmpRetBufType)); var_types callerRetBufType = lvaGetDesc(info.compRetBuffArg)->TypeGet(); GenTree* dstAddr = gtNewLclvNode(info.compRetBuffArg, callerRetBufType); GenTree* dst = gtNewObjNode(info.compMethodInfo->args.retTypeClass, dstAddr); GenTree* src = gtNewLclvNode(tmpRetBufNum, tmpRetBufType); constexpr bool isVolatile = false; constexpr bool isCopyBlock = true; copyToRetBufNode = gtNewBlkOpNode(dst, src, isVolatile, isCopyBlock); if (origCall->gtType != TYP_VOID) { retVal = gtClone(retBufArg); } } else if (origCall->gtType != TYP_VOID) { JITDUMP("Creating a new temp for the return value\n"); newRetLcl = lvaGrabTemp(false DEBUGARG("Return value for tail call dispatcher")); if (varTypeIsStruct(origCall->gtType)) { lvaSetStruct(newRetLcl, origCall->gtRetClsHnd, false); } else { // Since we pass a reference to the return value to the dispatcher // we need to use the real return type so we can normalize it on // load when we return it. lvaTable[newRetLcl].lvType = (var_types)origCall->gtReturnType; } lvaSetVarAddrExposed(newRetLcl DEBUGARG(AddressExposedReason::DISPATCH_RET_BUF)); retValArg = gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(newRetLcl, genActualType(lvaTable[newRetLcl].lvType))); retVal = gtNewLclvNode(newRetLcl, genActualType(lvaTable[newRetLcl].lvType)); if (varTypeIsStruct(origCall->gtType)) { retVal = impFixupStructReturnType(retVal, origCall->gtRetClsHnd, origCall->GetUnmanagedCallConv()); } } else { JITDUMP("No return value so using null pointer as arg\n"); retValArg = gtNewZeroConNode(TYP_I_IMPL); } callDispatcherNode->gtCallArgs = gtPrependNewCallArg(retValArg, callDispatcherNode->gtCallArgs); // Add callTarget callDispatcherNode->gtCallArgs = gtPrependNewCallArg(new (this, GT_FTN_ADDR) GenTreeFptrVal(TYP_I_IMPL, callTargetStubHnd), callDispatcherNode->gtCallArgs); // Add the caller's return address slot. if (lvaRetAddrVar == BAD_VAR_NUM) { lvaRetAddrVar = lvaGrabTemp(false DEBUGARG("Return address")); lvaTable[lvaRetAddrVar].lvType = TYP_I_IMPL; lvaSetVarAddrExposed(lvaRetAddrVar DEBUGARG(AddressExposedReason::DISPATCH_RET_BUF)); } GenTree* retAddrSlot = gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(lvaRetAddrVar, TYP_I_IMPL)); callDispatcherNode->gtCallArgs = gtPrependNewCallArg(retAddrSlot, callDispatcherNode->gtCallArgs); GenTree* finalTree = callDispatcherNode; if (copyToRetBufNode != nullptr) { finalTree = gtNewOperNode(GT_COMMA, TYP_VOID, callDispatcherNode, copyToRetBufNode); } if (origCall->gtType == TYP_VOID) { return finalTree; } assert(retVal != nullptr); finalTree = gtNewOperNode(GT_COMMA, origCall->TypeGet(), finalTree, retVal); // The JIT seems to want to CSE this comma and messes up multi-reg ret // values in the process. Just avoid CSE'ing this tree entirely in that // case. if (origCall->HasMultiRegRetVal()) { finalTree->gtFlags |= GTF_DONT_CSE; } return finalTree; } //------------------------------------------------------------------------ // getLookupTree: get a lookup tree // // Arguments: // pResolvedToken - resolved token of the call // pLookup - the lookup to get the tree for // handleFlags - flags to set on the result node // compileTimeHandle - compile-time handle corresponding to the lookup // // Return Value: // A node representing the lookup tree // GenTree* Compiler::getLookupTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, GenTreeFlags handleFlags, void* compileTimeHandle) { if (!pLookup->lookupKind.needsRuntimeLookup) { // No runtime lookup is required. // Access is direct or memory-indirect (of a fixed address) reference CORINFO_GENERIC_HANDLE handle = nullptr; void* pIndirection = nullptr; assert(pLookup->constLookup.accessType != IAT_PPVALUE && pLookup->constLookup.accessType != IAT_RELPVALUE); if (pLookup->constLookup.accessType == IAT_VALUE) { handle = pLookup->constLookup.handle; } else if (pLookup->constLookup.accessType == IAT_PVALUE) { pIndirection = pLookup->constLookup.addr; } return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, compileTimeHandle); } return getRuntimeLookupTree(pResolvedToken, pLookup, compileTimeHandle); } //------------------------------------------------------------------------ // getRuntimeLookupTree: get a tree for a runtime lookup // // Arguments: // pResolvedToken - resolved token of the call // pLookup - the lookup to get the tree for // compileTimeHandle - compile-time handle corresponding to the lookup // // Return Value: // A node representing the runtime lookup tree // GenTree* Compiler::getRuntimeLookupTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, void* compileTimeHandle) { assert(!compIsForInlining()); CORINFO_RUNTIME_LOOKUP* pRuntimeLookup = &pLookup->runtimeLookup; // If pRuntimeLookup->indirections is equal to CORINFO_USEHELPER, it specifies that a run-time helper should be // used; otherwise, it specifies the number of indirections via pRuntimeLookup->offsets array. if ((pRuntimeLookup->indirections == CORINFO_USEHELPER) || pRuntimeLookup->testForNull || pRuntimeLookup->testForFixup) { // If the first condition is true, runtime lookup tree is available only via the run-time helper function. // TODO-CQ If the second or third condition is true, we are always using the slow path since we can't // introduce control flow at this point. See impRuntimeLookupToTree for the logic to avoid calling the helper. // The long-term solution is to introduce a new node representing a runtime lookup, create instances // of that node both in the importer and here, and expand the node in lower (introducing control flow if // necessary). return gtNewRuntimeLookupHelperCallNode(pRuntimeLookup, getRuntimeContextTree(pLookup->lookupKind.runtimeLookupKind), compileTimeHandle); } GenTree* result = getRuntimeContextTree(pLookup->lookupKind.runtimeLookupKind); ArrayStack<GenTree*> stmts(getAllocator(CMK_ArrayStack)); auto cloneTree = [&](GenTree** tree DEBUGARG(const char* reason)) -> GenTree* { if (!((*tree)->gtFlags & GTF_GLOB_EFFECT)) { GenTree* clone = gtClone(*tree, true); if (clone) { return clone; } } unsigned temp = lvaGrabTemp(true DEBUGARG(reason)); stmts.Push(gtNewTempAssign(temp, *tree)); *tree = gtNewLclvNode(temp, lvaGetActualType(temp)); return gtNewLclvNode(temp, lvaGetActualType(temp)); }; // Apply repeated indirections for (WORD i = 0; i < pRuntimeLookup->indirections; i++) { GenTree* preInd = nullptr; if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset)) { preInd = cloneTree(&result DEBUGARG("getRuntimeLookupTree indirectOffset")); } if (i != 0) { result = gtNewOperNode(GT_IND, TYP_I_IMPL, result); result->gtFlags |= GTF_IND_NONFAULTING; result->gtFlags |= GTF_IND_INVARIANT; } if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset)) { result = gtNewOperNode(GT_ADD, TYP_I_IMPL, preInd, result); } if (pRuntimeLookup->offsets[i] != 0) { result = gtNewOperNode(GT_ADD, TYP_I_IMPL, result, gtNewIconNode(pRuntimeLookup->offsets[i], TYP_I_IMPL)); } } assert(!pRuntimeLookup->testForNull); if (pRuntimeLookup->indirections > 0) { assert(!pRuntimeLookup->testForFixup); result = gtNewOperNode(GT_IND, TYP_I_IMPL, result); result->gtFlags |= GTF_IND_NONFAULTING; } // Produces GT_COMMA(stmt1, GT_COMMA(stmt2, ... GT_COMMA(stmtN, result))) while (!stmts.Empty()) { result = gtNewOperNode(GT_COMMA, TYP_I_IMPL, stmts.Pop(), result); } DISPTREE(result); return result; } //------------------------------------------------------------------------ // getVirtMethodPointerTree: get a tree for a virtual method pointer // // Arguments: // thisPtr - tree representing `this` pointer // pResolvedToken - pointer to the resolved token of the method // pCallInfo - pointer to call info // // Return Value: // A node representing the virtual method pointer GenTree* Compiler::getVirtMethodPointerTree(GenTree* thisPtr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo) { GenTree* exactTypeDesc = getTokenHandleTree(pResolvedToken, true); GenTree* exactMethodDesc = getTokenHandleTree(pResolvedToken, false); GenTreeCall::Use* helpArgs = gtNewCallArgs(thisPtr, exactTypeDesc, exactMethodDesc); return gtNewHelperCallNode(CORINFO_HELP_VIRTUAL_FUNC_PTR, TYP_I_IMPL, helpArgs); } //------------------------------------------------------------------------ // getTokenHandleTree: get a handle tree for a token // // Arguments: // pResolvedToken - token to get a handle for // parent - whether parent should be imported // // Return Value: // A node representing the virtual method pointer GenTree* Compiler::getTokenHandleTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool parent) { CORINFO_GENERICHANDLE_RESULT embedInfo; info.compCompHnd->embedGenericHandle(pResolvedToken, parent, &embedInfo); GenTree* result = getLookupTree(pResolvedToken, &embedInfo.lookup, gtTokenToIconFlags(pResolvedToken->token), embedInfo.compileTimeHandle); // If we have a result and it requires runtime lookup, wrap it in a runtime lookup node. if ((result != nullptr) && embedInfo.lookup.lookupKind.needsRuntimeLookup) { result = gtNewRuntimeLookup(embedInfo.compileTimeHandle, embedInfo.handleType, result); } return result; } /***************************************************************************** * * Transform the given GT_CALL tree for tail call via JIT helper. */ void Compiler::fgMorphTailCallViaJitHelper(GenTreeCall* call) { JITDUMP("fgMorphTailCallViaJitHelper (before):\n"); DISPTREE(call); // For the helper-assisted tail calls, we need to push all the arguments // into a single list, and then add a few extra at the beginning or end. // // For x86, the tailcall helper is defined as: // // JIT_TailCall(<function args>, int numberOfOldStackArgsWords, int numberOfNewStackArgsWords, int flags, void* // callTarget) // // Note that the special arguments are on the stack, whereas the function arguments follow // the normal convention: there might be register arguments in ECX and EDX. The stack will // look like (highest address at the top): // first normal stack argument // ... // last normal stack argument // numberOfOldStackArgs // numberOfNewStackArgs // flags // callTarget // // Each special arg is 4 bytes. // // 'flags' is a bitmask where: // 1 == restore callee-save registers (EDI,ESI,EBX). The JIT always saves all // callee-saved registers for tailcall functions. Note that the helper assumes // that the callee-saved registers live immediately below EBP, and must have been // pushed in this order: EDI, ESI, EBX. // 2 == call target is a virtual stub dispatch. // // The x86 tail call helper lives in VM\i386\jithelp.asm. See that function for more details // on the custom calling convention. // Check for PInvoke call types that we don't handle in codegen yet. assert(!call->IsUnmanaged()); assert(call->IsVirtual() || (call->gtCallType != CT_INDIRECT) || (call->gtCallCookie == nullptr)); // Don't support tail calling helper methods assert(call->gtCallType != CT_HELPER); // We come this route only for tail prefixed calls that cannot be dispatched as // fast tail calls assert(!call->IsImplicitTailCall()); // We want to use the following assert, but it can modify the IR in some cases, so we // can't do that in an assert. // assert(!fgCanFastTailCall(call, nullptr)); // First move the 'this' pointer (if any) onto the regular arg list. We do this because // we are going to prepend special arguments onto the argument list (for non-x86 platforms), // and thus shift where the 'this' pointer will be passed to a later argument slot. In // addition, for all platforms, we are going to change the call into a helper call. Our code // generation code for handling calls to helpers does not handle 'this' pointers. So, when we // do this transformation, we must explicitly create a null 'this' pointer check, if required, // since special 'this' pointer handling will no longer kick in. // // Some call types, such as virtual vtable calls, require creating a call address expression // that involves the "this" pointer. Lowering will sometimes create an embedded statement // to create a temporary that is assigned to the "this" pointer expression, and then use // that temp to create the call address expression. This temp creation embedded statement // will occur immediately before the "this" pointer argument, and then will be used for both // the "this" pointer argument as well as the call address expression. In the normal ordering, // the embedded statement establishing the "this" pointer temp will execute before both uses // of the temp. However, for tail calls via a helper, we move the "this" pointer onto the // normal call argument list, and insert a placeholder which will hold the call address // expression. For non-x86, things are ok, because the order of execution of these is not // altered. However, for x86, the call address expression is inserted as the *last* argument // in the argument list, *after* the "this" pointer. It will be put on the stack, and be // evaluated first. To ensure we don't end up with out-of-order temp definition and use, // for those cases where call lowering creates an embedded form temp of "this", we will // create a temp here, early, that will later get morphed correctly. if (call->gtCallThisArg != nullptr) { GenTree* thisPtr = nullptr; GenTree* objp = call->gtCallThisArg->GetNode(); call->gtCallThisArg = nullptr; if ((call->IsDelegateInvoke() || call->IsVirtualVtable()) && !objp->OperIs(GT_LCL_VAR)) { // tmp = "this" unsigned lclNum = lvaGrabTemp(true DEBUGARG("tail call thisptr")); GenTree* asg = gtNewTempAssign(lclNum, objp); // COMMA(tmp = "this", tmp) var_types vt = objp->TypeGet(); GenTree* tmp = gtNewLclvNode(lclNum, vt); thisPtr = gtNewOperNode(GT_COMMA, vt, asg, tmp); objp = thisPtr; } if (call->NeedsNullCheck()) { // clone "this" if "this" has no side effects. if ((thisPtr == nullptr) && !(objp->gtFlags & GTF_SIDE_EFFECT)) { thisPtr = gtClone(objp, true); } var_types vt = objp->TypeGet(); if (thisPtr == nullptr) { // create a temp if either "this" has side effects or "this" is too complex to clone. // tmp = "this" unsigned lclNum = lvaGrabTemp(true DEBUGARG("tail call thisptr")); GenTree* asg = gtNewTempAssign(lclNum, objp); // COMMA(tmp = "this", deref(tmp)) GenTree* tmp = gtNewLclvNode(lclNum, vt); GenTree* nullcheck = gtNewNullCheck(tmp, compCurBB); asg = gtNewOperNode(GT_COMMA, TYP_VOID, asg, nullcheck); // COMMA(COMMA(tmp = "this", deref(tmp)), tmp) thisPtr = gtNewOperNode(GT_COMMA, vt, asg, gtNewLclvNode(lclNum, vt)); } else { // thisPtr = COMMA(deref("this"), "this") GenTree* nullcheck = gtNewNullCheck(thisPtr, compCurBB); thisPtr = gtNewOperNode(GT_COMMA, vt, nullcheck, gtClone(objp, true)); } call->gtFlags &= ~GTF_CALL_NULLCHECK; } else { thisPtr = objp; } // TODO-Cleanup: we leave it as a virtual stub call to // use logic in `LowerVirtualStubCall`, clear GTF_CALL_VIRT_KIND_MASK here // and change `LowerCall` to recognize it as a direct call. // During rationalization tmp="this" and null check will // materialize as embedded stmts in right execution order. assert(thisPtr != nullptr); call->gtCallArgs = gtPrependNewCallArg(thisPtr, call->gtCallArgs); } // Find the end of the argument list. ppArg will point at the last pointer; setting *ppArg will // append to the list. GenTreeCall::Use** ppArg = &call->gtCallArgs; for (GenTreeCall::Use& use : call->Args()) { ppArg = &use.NextRef(); } assert(ppArg != nullptr); assert(*ppArg == nullptr); unsigned nOldStkArgsWords = (compArgSize - (codeGen->intRegState.rsCalleeRegArgCount * REGSIZE_BYTES)) / REGSIZE_BYTES; GenTree* arg3 = gtNewIconNode((ssize_t)nOldStkArgsWords, TYP_I_IMPL); *ppArg = gtNewCallArgs(arg3); // numberOfOldStackArgs ppArg = &((*ppArg)->NextRef()); // Inject a placeholder for the count of outgoing stack arguments that the Lowering phase will generate. // The constant will be replaced. GenTree* arg2 = gtNewIconNode(9, TYP_I_IMPL); *ppArg = gtNewCallArgs(arg2); // numberOfNewStackArgs ppArg = &((*ppArg)->NextRef()); // Inject a placeholder for the flags. // The constant will be replaced. GenTree* arg1 = gtNewIconNode(8, TYP_I_IMPL); *ppArg = gtNewCallArgs(arg1); ppArg = &((*ppArg)->NextRef()); // Inject a placeholder for the real call target that the Lowering phase will generate. // The constant will be replaced. GenTree* arg0 = gtNewIconNode(7, TYP_I_IMPL); *ppArg = gtNewCallArgs(arg0); // It is now a varargs tail call. call->gtCallMoreFlags |= GTF_CALL_M_VARARGS; call->gtFlags &= ~GTF_CALL_POP_ARGS; // The function is responsible for doing explicit null check when it is necessary. assert(!call->NeedsNullCheck()); JITDUMP("fgMorphTailCallViaJitHelper (after):\n"); DISPTREE(call); } //------------------------------------------------------------------------ // fgGetStubAddrArg: Return the virtual stub address for the given call. // // Notes: // the JIT must place the address of the stub used to load the call target, // the "stub indirection cell", in special call argument with special register. // // Arguments: // call - a call that needs virtual stub dispatching. // // Return Value: // addr tree with set resister requirements. // GenTree* Compiler::fgGetStubAddrArg(GenTreeCall* call) { assert(call->IsVirtualStub()); GenTree* stubAddrArg; if (call->gtCallType == CT_INDIRECT) { stubAddrArg = gtClone(call->gtCallAddr, true); } else { assert(call->gtCallMoreFlags & GTF_CALL_M_VIRTSTUB_REL_INDIRECT); ssize_t addr = ssize_t(call->gtStubCallStubAddr); stubAddrArg = gtNewIconHandleNode(addr, GTF_ICON_FTN_ADDR); #ifdef DEBUG stubAddrArg->AsIntCon()->gtTargetHandle = (size_t)call->gtCallMethHnd; #endif } assert(stubAddrArg != nullptr); stubAddrArg->SetRegNum(virtualStubParamInfo->GetReg()); return stubAddrArg; } //------------------------------------------------------------------------------ // fgGetArgTabEntryParameterLclNum : Get the lcl num for the parameter that // corresponds to the argument to a recursive call. // // Notes: // Due to non-standard args this is not just fgArgTabEntry::argNum. // For example, in R2R compilations we will have added a non-standard // arg for the R2R indirection cell. // // Arguments: // argTabEntry - the arg // unsigned Compiler::fgGetArgTabEntryParameterLclNum(GenTreeCall* call, fgArgTabEntry* argTabEntry) { fgArgInfo* argInfo = call->fgArgInfo; unsigned argCount = argInfo->ArgCount(); fgArgTabEntry** argTable = argInfo->ArgTable(); unsigned numToRemove = 0; for (unsigned i = 0; i < argCount; i++) { fgArgTabEntry* arg = argTable[i]; // Late added args add extra args that do not map to IL parameters and that we should not reassign. if (!arg->isNonStandard() || !arg->isNonStandardArgAddedLate()) continue; if (arg->argNum < argTabEntry->argNum) numToRemove++; } return argTabEntry->argNum - numToRemove; } //------------------------------------------------------------------------------ // fgMorphRecursiveFastTailCallIntoLoop : Transform a recursive fast tail call into a loop. // // // Arguments: // block - basic block ending with a recursive fast tail call // recursiveTailCall - recursive tail call to transform // // Notes: // The legality of the transformation is ensured by the checks in endsWithTailCallConvertibleToLoop. void Compiler::fgMorphRecursiveFastTailCallIntoLoop(BasicBlock* block, GenTreeCall* recursiveTailCall) { assert(recursiveTailCall->IsTailCallConvertibleToLoop()); Statement* lastStmt = block->lastStmt(); assert(recursiveTailCall == lastStmt->GetRootNode()); // Transform recursive tail call into a loop. Statement* earlyArgInsertionPoint = lastStmt; const DebugInfo& callDI = lastStmt->GetDebugInfo(); // Hoist arg setup statement for the 'this' argument. GenTreeCall::Use* thisArg = recursiveTailCall->gtCallThisArg; if ((thisArg != nullptr) && !thisArg->GetNode()->IsNothingNode() && !thisArg->GetNode()->IsArgPlaceHolderNode()) { Statement* thisArgStmt = gtNewStmt(thisArg->GetNode(), callDI); fgInsertStmtBefore(block, earlyArgInsertionPoint, thisArgStmt); } // All arguments whose trees may involve caller parameter local variables need to be assigned to temps first; // then the temps need to be assigned to the method parameters. This is done so that the caller // parameters are not re-assigned before call arguments depending on them are evaluated. // tmpAssignmentInsertionPoint and paramAssignmentInsertionPoint keep track of // where the next temp or parameter assignment should be inserted. // In the example below the first call argument (arg1 - 1) needs to be assigned to a temp first // while the second call argument (const 1) doesn't. // Basic block before tail recursion elimination: // ***** BB04, stmt 1 (top level) // [000037] ------------ * stmtExpr void (top level) (IL 0x00A...0x013) // [000033] --C - G------ - \--* call void RecursiveMethod // [000030] ------------ | / --* const int - 1 // [000031] ------------arg0 in rcx + --* +int // [000029] ------------ | \--* lclVar int V00 arg1 // [000032] ------------arg1 in rdx \--* const int 1 // // // Basic block after tail recursion elimination : // ***** BB04, stmt 1 (top level) // [000051] ------------ * stmtExpr void (top level) (IL 0x00A... ? ? ? ) // [000030] ------------ | / --* const int - 1 // [000031] ------------ | / --* +int // [000029] ------------ | | \--* lclVar int V00 arg1 // [000050] - A---------- \--* = int // [000049] D------N---- \--* lclVar int V02 tmp0 // // ***** BB04, stmt 2 (top level) // [000055] ------------ * stmtExpr void (top level) (IL 0x00A... ? ? ? ) // [000052] ------------ | / --* lclVar int V02 tmp0 // [000054] - A---------- \--* = int // [000053] D------N---- \--* lclVar int V00 arg0 // ***** BB04, stmt 3 (top level) // [000058] ------------ * stmtExpr void (top level) (IL 0x00A... ? ? ? ) // [000032] ------------ | / --* const int 1 // [000057] - A---------- \--* = int // [000056] D------N---- \--* lclVar int V01 arg1 Statement* tmpAssignmentInsertionPoint = lastStmt; Statement* paramAssignmentInsertionPoint = lastStmt; // Process early args. They may contain both setup statements for late args and actual args. // Early args don't include 'this' arg. We need to account for that so that the call to gtArgEntryByArgNum // below has the correct second argument. int earlyArgIndex = (thisArg == nullptr) ? 0 : 1; for (GenTreeCall::Use& use : recursiveTailCall->Args()) { GenTree* earlyArg = use.GetNode(); if (!earlyArg->IsNothingNode() && !earlyArg->IsArgPlaceHolderNode()) { if ((earlyArg->gtFlags & GTF_LATE_ARG) != 0) { // This is a setup node so we need to hoist it. Statement* earlyArgStmt = gtNewStmt(earlyArg, callDI); fgInsertStmtBefore(block, earlyArgInsertionPoint, earlyArgStmt); } else { // This is an actual argument that needs to be assigned to the corresponding caller parameter. fgArgTabEntry* curArgTabEntry = gtArgEntryByArgNum(recursiveTailCall, earlyArgIndex); // Late-added non-standard args are extra args that are not passed as locals, so skip those if (!curArgTabEntry->isNonStandard() || !curArgTabEntry->isNonStandardArgAddedLate()) { Statement* paramAssignStmt = fgAssignRecursiveCallArgToCallerParam(earlyArg, curArgTabEntry, fgGetArgTabEntryParameterLclNum(recursiveTailCall, curArgTabEntry), block, callDI, tmpAssignmentInsertionPoint, paramAssignmentInsertionPoint); if ((tmpAssignmentInsertionPoint == lastStmt) && (paramAssignStmt != nullptr)) { // All temp assignments will happen before the first param assignment. tmpAssignmentInsertionPoint = paramAssignStmt; } } } } earlyArgIndex++; } // Process late args. int lateArgIndex = 0; for (GenTreeCall::Use& use : recursiveTailCall->LateArgs()) { // A late argument is an actual argument that needs to be assigned to the corresponding caller's parameter. GenTree* lateArg = use.GetNode(); fgArgTabEntry* curArgTabEntry = gtArgEntryByLateArgIndex(recursiveTailCall, lateArgIndex); // Late-added non-standard args are extra args that are not passed as locals, so skip those if (!curArgTabEntry->isNonStandard() || !curArgTabEntry->isNonStandardArgAddedLate()) { Statement* paramAssignStmt = fgAssignRecursiveCallArgToCallerParam(lateArg, curArgTabEntry, fgGetArgTabEntryParameterLclNum(recursiveTailCall, curArgTabEntry), block, callDI, tmpAssignmentInsertionPoint, paramAssignmentInsertionPoint); if ((tmpAssignmentInsertionPoint == lastStmt) && (paramAssignStmt != nullptr)) { // All temp assignments will happen before the first param assignment. tmpAssignmentInsertionPoint = paramAssignStmt; } } lateArgIndex++; } // If the method has starg.s 0 or ldarga.s 0 a special local (lvaArg0Var) is created so that // compThisArg stays immutable. Normally it's assigned in fgFirstBBScratch block. Since that // block won't be in the loop (it's assumed to have no predecessors), we need to update the special local here. if (!info.compIsStatic && (lvaArg0Var != info.compThisArg)) { var_types thisType = lvaTable[info.compThisArg].TypeGet(); GenTree* arg0 = gtNewLclvNode(lvaArg0Var, thisType); GenTree* arg0Assignment = gtNewAssignNode(arg0, gtNewLclvNode(info.compThisArg, thisType)); Statement* arg0AssignmentStmt = gtNewStmt(arg0Assignment, callDI); fgInsertStmtBefore(block, paramAssignmentInsertionPoint, arg0AssignmentStmt); } // If compInitMem is set, we may need to zero-initialize some locals. Normally it's done in the prolog // but this loop can't include the prolog. Since we don't have liveness information, we insert zero-initialization // for all non-parameter IL locals as well as temp structs with GC fields. // Liveness phase will remove unnecessary initializations. if (info.compInitMem || compSuppressedZeroInit) { unsigned varNum; LclVarDsc* varDsc; for (varNum = 0, varDsc = lvaTable; varNum < lvaCount; varNum++, varDsc++) { #if FEATURE_FIXED_OUT_ARGS if (varNum == lvaOutgoingArgSpaceVar) { continue; } #endif // FEATURE_FIXED_OUT_ARGS if (!varDsc->lvIsParam) { var_types lclType = varDsc->TypeGet(); bool isUserLocal = (varNum < info.compLocalsCount); bool structWithGCFields = ((lclType == TYP_STRUCT) && varDsc->GetLayout()->HasGCPtr()); bool hadSuppressedInit = varDsc->lvSuppressedZeroInit; if ((info.compInitMem && (isUserLocal || structWithGCFields)) || hadSuppressedInit) { GenTree* lcl = gtNewLclvNode(varNum, lclType); GenTree* init = nullptr; if (varTypeIsStruct(lclType)) { const bool isVolatile = false; const bool isCopyBlock = false; init = gtNewBlkOpNode(lcl, gtNewIconNode(0), isVolatile, isCopyBlock); init = fgMorphInitBlock(init); } else { GenTree* zero = gtNewZeroConNode(genActualType(lclType)); init = gtNewAssignNode(lcl, zero); } Statement* initStmt = gtNewStmt(init, callDI); fgInsertStmtBefore(block, lastStmt, initStmt); } } } } // Remove the call fgRemoveStmt(block, lastStmt); // Set the loop edge. if (opts.IsOSR()) { // Todo: this may not look like a viable loop header. // Might need the moral equivalent of a scratch BB. block->bbJumpDest = fgEntryBB; } else { // Ensure we have a scratch block and then target the next // block. Loop detection needs to see a pred out of the loop, // so mark the scratch block BBF_DONT_REMOVE to prevent empty // block removal on it. fgEnsureFirstBBisScratch(); fgFirstBB->bbFlags |= BBF_DONT_REMOVE; block->bbJumpDest = fgFirstBB->bbNext; } // Finish hooking things up. block->bbJumpKind = BBJ_ALWAYS; fgAddRefPred(block->bbJumpDest, block); block->bbFlags &= ~BBF_HAS_JMP; } //------------------------------------------------------------------------------ // fgAssignRecursiveCallArgToCallerParam : Assign argument to a recursive call to the corresponding caller parameter. // // // Arguments: // arg - argument to assign // argTabEntry - argument table entry corresponding to arg // lclParamNum - the lcl num of the parameter // block --- basic block the call is in // callILOffset - IL offset of the call // tmpAssignmentInsertionPoint - tree before which temp assignment should be inserted (if necessary) // paramAssignmentInsertionPoint - tree before which parameter assignment should be inserted // // Return Value: // parameter assignment statement if one was inserted; nullptr otherwise. Statement* Compiler::fgAssignRecursiveCallArgToCallerParam(GenTree* arg, fgArgTabEntry* argTabEntry, unsigned lclParamNum, BasicBlock* block, const DebugInfo& callDI, Statement* tmpAssignmentInsertionPoint, Statement* paramAssignmentInsertionPoint) { // Call arguments should be assigned to temps first and then the temps should be assigned to parameters because // some argument trees may reference parameters directly. GenTree* argInTemp = nullptr; bool needToAssignParameter = true; // TODO-CQ: enable calls with struct arguments passed in registers. noway_assert(!varTypeIsStruct(arg->TypeGet())); if ((argTabEntry->isTmp) || arg->IsCnsIntOrI() || arg->IsCnsFltOrDbl()) { // The argument is already assigned to a temp or is a const. argInTemp = arg; } else if (arg->OperGet() == GT_LCL_VAR) { unsigned lclNum = arg->AsLclVar()->GetLclNum(); LclVarDsc* varDsc = lvaGetDesc(lclNum); if (!varDsc->lvIsParam) { // The argument is a non-parameter local so it doesn't need to be assigned to a temp. argInTemp = arg; } else if (lclNum == lclParamNum) { // The argument is the same parameter local that we were about to assign so // we can skip the assignment. needToAssignParameter = false; } } // TODO: We don't need temp assignments if we can prove that the argument tree doesn't involve // any caller parameters. Some common cases are handled above but we may be able to eliminate // more temp assignments. Statement* paramAssignStmt = nullptr; if (needToAssignParameter) { if (argInTemp == nullptr) { // The argument is not assigned to a temp. We need to create a new temp and insert an assignment. // TODO: we can avoid a temp assignment if we can prove that the argument tree // doesn't involve any caller parameters. unsigned tmpNum = lvaGrabTemp(true DEBUGARG("arg temp")); lvaTable[tmpNum].lvType = arg->gtType; GenTree* tempSrc = arg; GenTree* tempDest = gtNewLclvNode(tmpNum, tempSrc->gtType); GenTree* tmpAssignNode = gtNewAssignNode(tempDest, tempSrc); Statement* tmpAssignStmt = gtNewStmt(tmpAssignNode, callDI); fgInsertStmtBefore(block, tmpAssignmentInsertionPoint, tmpAssignStmt); argInTemp = gtNewLclvNode(tmpNum, tempSrc->gtType); } // Now assign the temp to the parameter. const LclVarDsc* paramDsc = lvaGetDesc(lclParamNum); assert(paramDsc->lvIsParam); GenTree* paramDest = gtNewLclvNode(lclParamNum, paramDsc->lvType); GenTree* paramAssignNode = gtNewAssignNode(paramDest, argInTemp); paramAssignStmt = gtNewStmt(paramAssignNode, callDI); fgInsertStmtBefore(block, paramAssignmentInsertionPoint, paramAssignStmt); } return paramAssignStmt; } /***************************************************************************** * * Transform the given GT_CALL tree for code generation. */ GenTree* Compiler::fgMorphCall(GenTreeCall* call) { if (call->CanTailCall()) { GenTree* newNode = fgMorphPotentialTailCall(call); if (newNode != nullptr) { return newNode; } assert(!call->CanTailCall()); #if FEATURE_MULTIREG_RET if (fgGlobalMorph && call->HasMultiRegRetVal() && varTypeIsStruct(call->TypeGet())) { // The tail call has been rejected so we must finish the work deferred // by impFixupCallStructReturn for multi-reg-returning calls and transform // ret call // into // temp = call // ret temp // Force re-evaluating the argInfo as the return argument has changed. call->ResetArgInfo(); // Create a new temp. unsigned tmpNum = lvaGrabTemp(false DEBUGARG("Return value temp for multi-reg return (rejected tail call).")); lvaTable[tmpNum].lvIsMultiRegRet = true; CORINFO_CLASS_HANDLE structHandle = call->gtRetClsHnd; assert(structHandle != NO_CLASS_HANDLE); const bool unsafeValueClsCheck = false; lvaSetStruct(tmpNum, structHandle, unsafeValueClsCheck); var_types structType = lvaTable[tmpNum].lvType; GenTree* dst = gtNewLclvNode(tmpNum, structType); GenTree* assg = gtNewAssignNode(dst, call); assg = fgMorphTree(assg); // Create the assignment statement and insert it before the current statement. Statement* assgStmt = gtNewStmt(assg, compCurStmt->GetDebugInfo()); fgInsertStmtBefore(compCurBB, compCurStmt, assgStmt); // Return the temp. GenTree* result = gtNewLclvNode(tmpNum, lvaTable[tmpNum].lvType); result->gtFlags |= GTF_DONT_CSE; compCurBB->bbFlags |= BBF_HAS_CALL; // This block has a call #ifdef DEBUG if (verbose) { printf("\nInserting assignment of a multi-reg call result to a temp:\n"); gtDispStmt(assgStmt); } result->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif // DEBUG return result; } #endif } if ((call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) == 0 && (call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_VIRTUAL_FUNC_PTR) #ifdef FEATURE_READYTORUN || call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_READYTORUN_VIRTUAL_FUNC_PTR) #endif ) && (call == fgMorphStmt->GetRootNode())) { // This is call to CORINFO_HELP_VIRTUAL_FUNC_PTR with ignored result. // Transform it into a null check. GenTree* thisPtr = call->gtCallArgs->GetNode(); GenTree* nullCheck = gtNewNullCheck(thisPtr, compCurBB); return fgMorphTree(nullCheck); } noway_assert(call->gtOper == GT_CALL); // // Only count calls once (only in the global morph phase) // if (fgGlobalMorph) { if (call->gtCallType == CT_INDIRECT) { optCallCount++; optIndirectCallCount++; } else if (call->gtCallType == CT_USER_FUNC) { optCallCount++; if (call->IsVirtual()) { optIndirectCallCount++; } } } // Couldn't inline - remember that this BB contains method calls // Mark the block as a GC safe point for the call if possible. // In the event the call indicates the block isn't a GC safe point // and the call is unmanaged with a GC transition suppression request // then insert a GC poll. CLANG_FORMAT_COMMENT_ANCHOR; if (IsGcSafePoint(call)) { compCurBB->bbFlags |= BBF_GC_SAFE_POINT; } // Regardless of the state of the basic block with respect to GC safe point, // we will always insert a GC Poll for scenarios involving a suppressed GC // transition. Only mark the block for GC Poll insertion on the first morph. if (fgGlobalMorph && call->IsUnmanaged() && call->IsSuppressGCTransition()) { compCurBB->bbFlags |= (BBF_HAS_SUPPRESSGC_CALL | BBF_GC_SAFE_POINT); optMethodFlags |= OMF_NEEDS_GCPOLLS; } // Morph Type.op_Equality, Type.op_Inequality, and Enum.HasFlag // // We need to do these before the arguments are morphed if ((call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC)) { // See if this is foldable GenTree* optTree = gtFoldExprCall(call); // If we optimized, morph the result if (optTree != call) { return fgMorphTree(optTree); } } compCurBB->bbFlags |= BBF_HAS_CALL; // This block has a call /* Process the "normal" argument list */ call = fgMorphArgs(call); noway_assert(call->gtOper == GT_CALL); // Should we expand this virtual method call target early here? // if (call->IsExpandedEarly() && call->IsVirtualVtable()) { // We only expand the Vtable Call target once in the global morph phase if (fgGlobalMorph) { assert(call->gtControlExpr == nullptr); // We only call this method and assign gtControlExpr once call->gtControlExpr = fgExpandVirtualVtableCallTarget(call); } // We always have to morph or re-morph the control expr // call->gtControlExpr = fgMorphTree(call->gtControlExpr); // Propagate any gtFlags into the call call->gtFlags |= call->gtControlExpr->gtFlags; } // Morph stelem.ref helper call to store a null value, into a store into an array without the helper. // This needs to be done after the arguments are morphed to ensure constant propagation has already taken place. if (opts.OptimizationEnabled() && (call->gtCallType == CT_HELPER) && (call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_ARRADDR_ST))) { GenTree* value = gtArgEntryByArgNum(call, 2)->GetNode(); if (value->IsIntegralConst(0)) { assert(value->OperGet() == GT_CNS_INT); GenTree* arr = gtArgEntryByArgNum(call, 0)->GetNode(); GenTree* index = gtArgEntryByArgNum(call, 1)->GetNode(); // Either or both of the array and index arguments may have been spilled to temps by `fgMorphArgs`. Copy // the spill trees as well if necessary. GenTreeOp* argSetup = nullptr; for (GenTreeCall::Use& use : call->Args()) { GenTree* const arg = use.GetNode(); if (arg->OperGet() != GT_ASG) { continue; } assert(arg != arr); assert(arg != index); arg->gtFlags &= ~GTF_LATE_ARG; GenTree* op1 = argSetup; if (op1 == nullptr) { op1 = gtNewNothingNode(); #if DEBUG op1->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif // DEBUG } argSetup = new (this, GT_COMMA) GenTreeOp(GT_COMMA, TYP_VOID, op1, arg); #if DEBUG argSetup->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif // DEBUG } #ifdef DEBUG auto resetMorphedFlag = [](GenTree** slot, fgWalkData* data) -> fgWalkResult { (*slot)->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED; return WALK_CONTINUE; }; fgWalkTreePost(&arr, resetMorphedFlag); fgWalkTreePost(&index, resetMorphedFlag); fgWalkTreePost(&value, resetMorphedFlag); #endif // DEBUG GenTree* const arrIndexNode = gtNewIndexRef(TYP_REF, arr, index); GenTree* const arrStore = gtNewAssignNode(arrIndexNode, value); GenTree* result = fgMorphTree(arrStore); if (argSetup != nullptr) { result = new (this, GT_COMMA) GenTreeOp(GT_COMMA, TYP_VOID, argSetup, result); #if DEBUG result->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif // DEBUG } return result; } } if (call->IsNoReturn()) { // // If we know that the call does not return then we can set fgRemoveRestOfBlock // to remove all subsequent statements and change the call's basic block to BBJ_THROW. // As a result the compiler won't need to preserve live registers across the call. // // This isn't need for tail calls as there shouldn't be any code after the call anyway. // Besides, the tail call code is part of the epilog and converting the block to // BBJ_THROW would result in the tail call being dropped as the epilog is generated // only for BBJ_RETURN blocks. // if (!call->IsTailCall()) { fgRemoveRestOfBlock = true; } } return call; } /***************************************************************************** * * Expand and return the call target address for a VirtualCall * The code here should match that generated by LowerVirtualVtableCall */ GenTree* Compiler::fgExpandVirtualVtableCallTarget(GenTreeCall* call) { GenTree* result; JITDUMP("Expanding virtual call target for %d.%s:\n", call->gtTreeID, GenTree::OpName(call->gtOper)); noway_assert(call->gtCallType == CT_USER_FUNC); // get a reference to the thisPtr being passed fgArgTabEntry* thisArgTabEntry = gtArgEntryByArgNum(call, 0); GenTree* thisPtr = thisArgTabEntry->GetNode(); // fgMorphArgs must enforce this invariant by creating a temp // assert(thisPtr->OperIsLocal()); // Make a copy of the thisPtr by cloning // thisPtr = gtClone(thisPtr, true); noway_assert(thisPtr != nullptr); // Get hold of the vtable offset unsigned vtabOffsOfIndirection; unsigned vtabOffsAfterIndirection; bool isRelative; info.compCompHnd->getMethodVTableOffset(call->gtCallMethHnd, &vtabOffsOfIndirection, &vtabOffsAfterIndirection, &isRelative); // Dereference the this pointer to obtain the method table, it is called vtab below GenTree* vtab; assert(VPTR_OFFS == 0); // We have to add this value to the thisPtr to get the methodTable vtab = gtNewOperNode(GT_IND, TYP_I_IMPL, thisPtr); vtab->gtFlags |= GTF_IND_INVARIANT; // Get the appropriate vtable chunk if (vtabOffsOfIndirection != CORINFO_VIRTUALCALL_NO_CHUNK) { // Note this isRelative code path is currently never executed // as the VM doesn't ever return: isRelative == true // if (isRelative) { // MethodTable offset is a relative pointer. // // Additional temporary variable is used to store virtual table pointer. // Address of method is obtained by the next computations: // // Save relative offset to tmp (vtab is virtual table pointer, vtabOffsOfIndirection is offset of // vtable-1st-level-indirection): // tmp = vtab // // Save address of method to result (vtabOffsAfterIndirection is offset of vtable-2nd-level-indirection): // result = [tmp + vtabOffsOfIndirection + vtabOffsAfterIndirection + [tmp + vtabOffsOfIndirection]] // // // When isRelative is true we need to setup two temporary variables // var1 = vtab // var2 = var1 + vtabOffsOfIndirection + vtabOffsAfterIndirection + [var1 + vtabOffsOfIndirection] // result = [var2] + var2 // unsigned varNum1 = lvaGrabTemp(true DEBUGARG("var1 - vtab")); unsigned varNum2 = lvaGrabTemp(true DEBUGARG("var2 - relative")); GenTree* asgVar1 = gtNewTempAssign(varNum1, vtab); // var1 = vtab // [tmp + vtabOffsOfIndirection] GenTree* tmpTree1 = gtNewOperNode(GT_ADD, TYP_I_IMPL, gtNewLclvNode(varNum1, TYP_I_IMPL), gtNewIconNode(vtabOffsOfIndirection, TYP_I_IMPL)); tmpTree1 = gtNewOperNode(GT_IND, TYP_I_IMPL, tmpTree1, false); tmpTree1->gtFlags |= GTF_IND_NONFAULTING; tmpTree1->gtFlags |= GTF_IND_INVARIANT; // var1 + vtabOffsOfIndirection + vtabOffsAfterIndirection GenTree* tmpTree2 = gtNewOperNode(GT_ADD, TYP_I_IMPL, gtNewLclvNode(varNum1, TYP_I_IMPL), gtNewIconNode(vtabOffsOfIndirection + vtabOffsAfterIndirection, TYP_I_IMPL)); // var1 + vtabOffsOfIndirection + vtabOffsAfterIndirection + [var1 + vtabOffsOfIndirection] tmpTree2 = gtNewOperNode(GT_ADD, TYP_I_IMPL, tmpTree2, tmpTree1); GenTree* asgVar2 = gtNewTempAssign(varNum2, tmpTree2); // var2 = <expression> // This last indirection is not invariant, but is non-faulting result = gtNewOperNode(GT_IND, TYP_I_IMPL, gtNewLclvNode(varNum2, TYP_I_IMPL), false); // [var2] result->gtFlags |= GTF_IND_NONFAULTING; result = gtNewOperNode(GT_ADD, TYP_I_IMPL, result, gtNewLclvNode(varNum2, TYP_I_IMPL)); // [var2] + var2 // Now stitch together the two assignment and the calculation of result into a single tree GenTree* commaTree = gtNewOperNode(GT_COMMA, TYP_I_IMPL, asgVar2, result); result = gtNewOperNode(GT_COMMA, TYP_I_IMPL, asgVar1, commaTree); } else { // result = [vtab + vtabOffsOfIndirection] result = gtNewOperNode(GT_ADD, TYP_I_IMPL, vtab, gtNewIconNode(vtabOffsOfIndirection, TYP_I_IMPL)); result = gtNewOperNode(GT_IND, TYP_I_IMPL, result, false); result->gtFlags |= GTF_IND_NONFAULTING; result->gtFlags |= GTF_IND_INVARIANT; } } else { result = vtab; assert(!isRelative); } if (!isRelative) { // Load the function address // result = [result + vtabOffsAfterIndirection] result = gtNewOperNode(GT_ADD, TYP_I_IMPL, result, gtNewIconNode(vtabOffsAfterIndirection, TYP_I_IMPL)); // This last indirection is not invariant, but is non-faulting result = gtNewOperNode(GT_IND, TYP_I_IMPL, result, false); result->gtFlags |= GTF_IND_NONFAULTING; } return result; } /***************************************************************************** * * Transform the given constant tree for code generation. */ GenTree* Compiler::fgMorphConst(GenTree* tree) { assert(tree->OperIsConst()); /* Clear any exception flags or other unnecessary flags * that may have been set before folding this node to a constant */ tree->gtFlags &= ~(GTF_ALL_EFFECT | GTF_REVERSE_OPS); if (!tree->OperIs(GT_CNS_STR)) { return tree; } if (tree->AsStrCon()->IsStringEmptyField()) { LPVOID pValue; InfoAccessType iat = info.compCompHnd->emptyStringLiteral(&pValue); return fgMorphTree(gtNewStringLiteralNode(iat, pValue)); } // TODO-CQ: Do this for compCurBB->isRunRarely(). Doing that currently will // guarantee slow performance for that block. Instead cache the return value // of CORINFO_HELP_STRCNS and go to cache first giving reasonable perf. bool useLazyStrCns = false; if (compCurBB->bbJumpKind == BBJ_THROW) { useLazyStrCns = true; } else if (fgGlobalMorph && compCurStmt->GetRootNode()->IsCall()) { // Quick check: if the root node of the current statement happens to be a noreturn call. GenTreeCall* call = compCurStmt->GetRootNode()->AsCall(); useLazyStrCns = call->IsNoReturn() || fgIsThrow(call); } if (useLazyStrCns) { CorInfoHelpFunc helper = info.compCompHnd->getLazyStringLiteralHelper(tree->AsStrCon()->gtScpHnd); if (helper != CORINFO_HELP_UNDEF) { // For un-important blocks, we want to construct the string lazily GenTreeCall::Use* args; if (helper == CORINFO_HELP_STRCNS_CURRENT_MODULE) { args = gtNewCallArgs(gtNewIconNode(RidFromToken(tree->AsStrCon()->gtSconCPX), TYP_INT)); } else { args = gtNewCallArgs(gtNewIconNode(RidFromToken(tree->AsStrCon()->gtSconCPX), TYP_INT), gtNewIconEmbScpHndNode(tree->AsStrCon()->gtScpHnd)); } tree = gtNewHelperCallNode(helper, TYP_REF, args); return fgMorphTree(tree); } } assert(tree->AsStrCon()->gtScpHnd == info.compScopeHnd || !IsUninitialized(tree->AsStrCon()->gtScpHnd)); LPVOID pValue; InfoAccessType iat = info.compCompHnd->constructStringLiteral(tree->AsStrCon()->gtScpHnd, tree->AsStrCon()->gtSconCPX, &pValue); tree = gtNewStringLiteralNode(iat, pValue); return fgMorphTree(tree); } //------------------------------------------------------------------------ // fgMorphTryFoldObjAsLclVar: try to fold an Obj node as a LclVar. // // Arguments: // obj - the obj node. // destroyNodes -- destroy nodes that are optimized away // // Return value: // GenTreeLclVar if the obj can be replaced by it, null otherwise. // // Notes: // TODO-CQ: currently this transformation is done only under copy block, // but it is benefitial to do for each OBJ node. However, `PUT_ARG_STACK` // for some platforms does not expect struct `LCL_VAR` as a source, so // it needs more work. // GenTreeLclVar* Compiler::fgMorphTryFoldObjAsLclVar(GenTreeObj* obj, bool destroyNodes) { if (opts.OptimizationEnabled()) { GenTree* op1 = obj->Addr(); assert(!op1->OperIs(GT_LCL_VAR_ADDR) && "missed an opt opportunity"); if (op1->OperIs(GT_ADDR)) { GenTreeUnOp* addr = op1->AsUnOp(); GenTree* addrOp = addr->gtGetOp1(); if (addrOp->TypeIs(obj->TypeGet()) && addrOp->OperIs(GT_LCL_VAR)) { GenTreeLclVar* lclVar = addrOp->AsLclVar(); ClassLayout* lclVarLayout = lvaGetDesc(lclVar)->GetLayout(); ClassLayout* objLayout = obj->GetLayout(); if (ClassLayout::AreCompatible(lclVarLayout, objLayout)) { #ifdef DEBUG CORINFO_CLASS_HANDLE objClsHandle = obj->GetLayout()->GetClassHandle(); assert(objClsHandle != NO_CLASS_HANDLE); if (verbose) { CORINFO_CLASS_HANDLE lclClsHnd = gtGetStructHandle(lclVar); printf("fold OBJ(ADDR(X)) [%06u] into X [%06u], ", dspTreeID(obj), dspTreeID(lclVar)); printf("with %s handles\n", ((lclClsHnd == objClsHandle) ? "matching" : "different")); } #endif // Keep the DONT_CSE flag in sync // (as the addr always marks it for its op1) lclVar->gtFlags &= ~GTF_DONT_CSE; lclVar->gtFlags |= (obj->gtFlags & GTF_DONT_CSE); if (destroyNodes) { DEBUG_DESTROY_NODE(obj); DEBUG_DESTROY_NODE(addr); } return lclVar; } } } } return nullptr; } /***************************************************************************** * * Transform the given GTK_LEAF tree for code generation. */ GenTree* Compiler::fgMorphLeaf(GenTree* tree) { assert(tree->OperKind() & GTK_LEAF); if (tree->gtOper == GT_LCL_VAR) { const bool forceRemorph = false; return fgMorphLocalVar(tree, forceRemorph); } else if (tree->gtOper == GT_LCL_FLD) { if (lvaGetDesc(tree->AsLclFld())->IsAddressExposed()) { tree->gtFlags |= GTF_GLOB_REF; } #ifdef TARGET_X86 if (info.compIsVarArgs) { GenTree* newTree = fgMorphStackArgForVarArgs(tree->AsLclFld()->GetLclNum(), tree->TypeGet(), tree->AsLclFld()->GetLclOffs()); if (newTree != nullptr) { if (newTree->OperIsBlk() && ((tree->gtFlags & GTF_VAR_DEF) == 0)) { newTree->SetOper(GT_IND); } return newTree; } } #endif // TARGET_X86 } else if (tree->gtOper == GT_FTN_ADDR) { GenTreeFptrVal* fptrValTree = tree->AsFptrVal(); // A function pointer address is being used. Let the VM know if this is the // target of a Delegate or a raw function pointer. bool isUnsafeFunctionPointer = !fptrValTree->gtFptrDelegateTarget; CORINFO_CONST_LOOKUP addrInfo; #ifdef FEATURE_READYTORUN if (fptrValTree->gtEntryPoint.addr != nullptr) { addrInfo = fptrValTree->gtEntryPoint; } else #endif { info.compCompHnd->getFunctionFixedEntryPoint(fptrValTree->gtFptrMethod, isUnsafeFunctionPointer, &addrInfo); } GenTree* indNode = nullptr; switch (addrInfo.accessType) { case IAT_PPVALUE: indNode = gtNewIndOfIconHandleNode(TYP_I_IMPL, (size_t)addrInfo.handle, GTF_ICON_CONST_PTR, true); // Add the second indirection indNode = gtNewOperNode(GT_IND, TYP_I_IMPL, indNode); // This indirection won't cause an exception. indNode->gtFlags |= GTF_IND_NONFAULTING; // This indirection also is invariant. indNode->gtFlags |= GTF_IND_INVARIANT; break; case IAT_PVALUE: indNode = gtNewIndOfIconHandleNode(TYP_I_IMPL, (size_t)addrInfo.handle, GTF_ICON_FTN_ADDR, true); break; case IAT_VALUE: // Refer to gtNewIconHandleNode() as the template for constructing a constant handle // tree->SetOper(GT_CNS_INT); tree->AsIntConCommon()->SetIconValue(ssize_t(addrInfo.handle)); tree->gtFlags |= GTF_ICON_FTN_ADDR; break; default: noway_assert(!"Unknown addrInfo.accessType"); } if (indNode != nullptr) { DEBUG_DESTROY_NODE(tree); tree = fgMorphTree(indNode); } } return tree; } void Compiler::fgAssignSetVarDef(GenTree* tree) { GenTreeLclVarCommon* lclVarCmnTree; bool isEntire = false; if (tree->DefinesLocal(this, &lclVarCmnTree, &isEntire)) { if (isEntire) { lclVarCmnTree->gtFlags |= GTF_VAR_DEF; } else { // We consider partial definitions to be modeled as uses followed by definitions. // This captures the idea that precedings defs are not necessarily made redundant // by this definition. lclVarCmnTree->gtFlags |= (GTF_VAR_DEF | GTF_VAR_USEASG); } } } //------------------------------------------------------------------------ // fgMorphOneAsgBlockOp: Attempt to replace a block assignment with a scalar assignment // // Arguments: // tree - The block assignment to be possibly morphed // // Return Value: // The modified tree if successful, nullptr otherwise. // // Assumptions: // 'tree' must be a block assignment. // // Notes: // If successful, this method always returns the incoming tree, modifying only // its arguments. // GenTree* Compiler::fgMorphOneAsgBlockOp(GenTree* tree) { // This must be a block assignment. noway_assert(tree->OperIsBlkOp()); var_types asgType = tree->TypeGet(); GenTree* asg = tree; GenTree* dest = asg->gtGetOp1(); GenTree* src = asg->gtGetOp2(); unsigned destVarNum = BAD_VAR_NUM; LclVarDsc* destVarDsc = nullptr; GenTree* destLclVarTree = nullptr; bool isCopyBlock = asg->OperIsCopyBlkOp(); bool isInitBlock = !isCopyBlock; unsigned size = 0; CORINFO_CLASS_HANDLE clsHnd = NO_CLASS_HANDLE; if (dest->gtEffectiveVal()->OperIsBlk()) { GenTreeBlk* lhsBlk = dest->gtEffectiveVal()->AsBlk(); size = lhsBlk->Size(); if (impIsAddressInLocal(lhsBlk->Addr(), &destLclVarTree)) { destVarNum = destLclVarTree->AsLclVarCommon()->GetLclNum(); destVarDsc = lvaGetDesc(destVarNum); } if (lhsBlk->OperGet() == GT_OBJ) { clsHnd = lhsBlk->AsObj()->GetLayout()->GetClassHandle(); } } else { // Is this an enregisterable struct that is already a simple assignment? // This can happen if we are re-morphing. // Note that we won't do this straightaway if this is a SIMD type, since it // may be a promoted lclVar (sometimes we promote the individual float fields of // fixed-size SIMD). if (dest->OperGet() == GT_IND) { noway_assert(asgType != TYP_STRUCT); if (varTypeIsStruct(asgType)) { destLclVarTree = fgIsIndirOfAddrOfLocal(dest); } if (isCopyBlock && destLclVarTree == nullptr && !src->OperIs(GT_LCL_VAR)) { fgMorphBlockOperand(src, asgType, genTypeSize(asgType), false /*isBlkReqd*/); dest->gtFlags |= GTF_DONT_CSE; return tree; } } else { noway_assert(dest->OperIsLocal()); destLclVarTree = dest; } if (destLclVarTree != nullptr) { destVarNum = destLclVarTree->AsLclVarCommon()->GetLclNum(); destVarDsc = lvaGetDesc(destVarNum); if (asgType == TYP_STRUCT) { clsHnd = destVarDsc->GetStructHnd(); size = destVarDsc->lvExactSize; } } if (asgType != TYP_STRUCT) { size = genTypeSize(asgType); } } if (size == 0) { return nullptr; } if ((destVarDsc != nullptr) && varTypeIsStruct(destLclVarTree) && destVarDsc->lvPromoted) { // Let fgMorphCopyBlock handle it. return nullptr; } if (src->IsCall() || src->OperIsSIMD()) { // Can't take ADDR from these nodes, let fgMorphCopyBlock handle it, #11413. return nullptr; } if ((destVarDsc != nullptr) && !varTypeIsStruct(destVarDsc->TypeGet())) { // // See if we can do a simple transformation: // // GT_ASG <TYP_size> // / \. // GT_IND GT_IND or CNS_INT // | | // [dest] [src] // if (asgType == TYP_STRUCT) { // It is possible to use `initobj` to init a primitive type on the stack, // like `ldloca.s 1; initobj 1B000003` where `V01` has type `ref`; // in this case we generate `ASG struct(BLK<8> struct(ADDR byref(LCL_VAR ref)), 0)` // and this code path transforms it into `ASG ref(LCL_VARref, 0)` because it is not a real // struct assignment. if (size == REGSIZE_BYTES) { if (clsHnd == NO_CLASS_HANDLE) { // A register-sized cpblk can be treated as an integer asignment. asgType = TYP_I_IMPL; } else { BYTE gcPtr; info.compCompHnd->getClassGClayout(clsHnd, &gcPtr); asgType = getJitGCType(gcPtr); } } else { switch (size) { case 1: asgType = TYP_BYTE; break; case 2: asgType = TYP_SHORT; break; #ifdef TARGET_64BIT case 4: asgType = TYP_INT; break; #endif // TARGET_64BIT } } } } GenTree* srcLclVarTree = nullptr; LclVarDsc* srcVarDsc = nullptr; if (isCopyBlock) { if (src->OperGet() == GT_LCL_VAR) { srcLclVarTree = src; srcVarDsc = lvaGetDesc(src->AsLclVarCommon()); } else if (src->OperIsIndir() && impIsAddressInLocal(src->AsOp()->gtOp1, &srcLclVarTree)) { srcVarDsc = lvaGetDesc(srcLclVarTree->AsLclVarCommon()); } if ((srcVarDsc != nullptr) && varTypeIsStruct(srcLclVarTree) && srcVarDsc->lvPromoted) { // Let fgMorphCopyBlock handle it. return nullptr; } } if (asgType != TYP_STRUCT) { noway_assert((size <= REGSIZE_BYTES) || varTypeIsSIMD(asgType)); // For initBlk, a non constant source is not going to allow us to fiddle // with the bits to create a single assigment. // Nor do we (for now) support transforming an InitBlock of SIMD type, unless // it is a direct assignment to a lclVar and the value is zero. if (isInitBlock) { if (!src->IsConstInitVal()) { return nullptr; } if (varTypeIsSIMD(asgType) && (!src->IsIntegralConst(0) || (destVarDsc == nullptr))) { return nullptr; } } if (destVarDsc != nullptr) { // Kill everything about dest if (optLocalAssertionProp) { if (optAssertionCount > 0) { fgKillDependentAssertions(destVarNum DEBUGARG(tree)); } } // A previous incarnation of this code also required the local not to be // address-exposed(=taken). That seems orthogonal to the decision of whether // to do field-wise assignments: being address-exposed will cause it to be // "dependently" promoted, so it will be in the right memory location. One possible // further reason for avoiding field-wise stores is that the struct might have alignment-induced // holes, whose contents could be meaningful in unsafe code. If we decide that's a valid // concern, then we could compromise, and say that address-exposed + fields do not completely cover the // memory of the struct prevent field-wise assignments. Same situation exists for the "src" decision. if (varTypeIsStruct(destLclVarTree) && destVarDsc->lvPromoted) { // Let fgMorphInitBlock handle it. (Since we'll need to do field-var-wise assignments.) return nullptr; } else if (!varTypeIsFloating(destLclVarTree->TypeGet()) && (size == genTypeSize(destVarDsc))) { // Use the dest local var directly, as well as its type. dest = destLclVarTree; asgType = destVarDsc->lvType; // If the block operation had been a write to a local var of a small int type, // of the exact size of the small int type, and the var is NormalizeOnStore, // we would have labeled it GTF_VAR_USEASG, because the block operation wouldn't // have done that normalization. If we're now making it into an assignment, // the NormalizeOnStore will work, and it can be a full def. if (destVarDsc->lvNormalizeOnStore()) { dest->gtFlags &= (~GTF_VAR_USEASG); } } else { // Could be a non-promoted struct, or a floating point type local, or // an int subject to a partial write. Don't enregister. lvaSetVarDoNotEnregister(destVarNum DEBUGARG(DoNotEnregisterReason::OneAsgRetyping)); // Mark the local var tree as a definition point of the local. destLclVarTree->gtFlags |= GTF_VAR_DEF; if (size < destVarDsc->lvExactSize) { // If it's not a full-width assignment.... destLclVarTree->gtFlags |= GTF_VAR_USEASG; } if (dest == destLclVarTree) { GenTree* addr = gtNewOperNode(GT_ADDR, TYP_BYREF, dest); dest = gtNewIndir(asgType, addr); } } } // Check to ensure we don't have a reducible *(& ... ) if (dest->OperIsIndir() && dest->AsIndir()->Addr()->OperGet() == GT_ADDR) { // If dest is an Indir or Block, and it has a child that is a Addr node // GenTree* addrNode = dest->AsIndir()->Addr(); // known to be a GT_ADDR // Can we just remove the Ind(Addr(destOp)) and operate directly on 'destOp'? // GenTree* destOp = addrNode->gtGetOp1(); var_types destOpType = destOp->TypeGet(); // We can if we have a primitive integer type and the sizes are exactly the same. // if ((varTypeIsIntegralOrI(destOp) && (size == genTypeSize(destOpType)))) { dest = destOp; asgType = destOpType; } } if (dest->gtEffectiveVal()->OperIsIndir()) { // If we have no information about the destination, we have to assume it could // live anywhere (not just in the GC heap). // Mark the GT_IND node so that we use the correct write barrier helper in case // the field is a GC ref. if (!fgIsIndirOfAddrOfLocal(dest)) { dest->gtFlags |= (GTF_GLOB_REF | GTF_IND_TGTANYWHERE); tree->gtFlags |= GTF_GLOB_REF; } dest->SetIndirExceptionFlags(this); tree->gtFlags |= (dest->gtFlags & GTF_EXCEPT); } if (isCopyBlock) { if (srcVarDsc != nullptr) { // Handled above. assert(!varTypeIsStruct(srcLclVarTree) || !srcVarDsc->lvPromoted); if (!varTypeIsFloating(srcLclVarTree->TypeGet()) && size == genTypeSize(genActualType(srcLclVarTree->TypeGet()))) { // Use the src local var directly. src = srcLclVarTree; } else { // The source argument of the copyblk can potentially be accessed only through indir(addr(lclVar)) // or indir(lclVarAddr) so it must be on the stack. unsigned lclVarNum = srcLclVarTree->AsLclVarCommon()->GetLclNum(); lvaSetVarDoNotEnregister(lclVarNum DEBUGARG(DoNotEnregisterReason::OneAsgRetyping)); GenTree* srcAddr; if (src == srcLclVarTree) { srcAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, src); src = gtNewOperNode(GT_IND, asgType, srcAddr); } else { assert(src->OperIsIndir()); } } } if (src->OperIsIndir()) { if (!fgIsIndirOfAddrOfLocal(src)) { // If we have no information about the src, we have to assume it could // live anywhere (not just in the GC heap). // Mark the GT_IND node so that we use the correct write barrier helper in case // the field is a GC ref. src->gtFlags |= (GTF_GLOB_REF | GTF_IND_TGTANYWHERE); } src->SetIndirExceptionFlags(this); } } else // InitBlk { #ifdef FEATURE_SIMD if (varTypeIsSIMD(asgType)) { assert(!isCopyBlock); // Else we would have returned the tree above. noway_assert(src->IsIntegralConst(0)); noway_assert(destVarDsc != nullptr); src = gtNewSIMDNode(asgType, src, SIMDIntrinsicInit, destVarDsc->GetSimdBaseJitType(), size); } else #endif { if (src->OperIsInitVal()) { src = src->gtGetOp1(); } assert(src->IsCnsIntOrI()); // This will mutate the integer constant, in place, to be the correct // value for the type we are using in the assignment. src->AsIntCon()->FixupInitBlkValue(asgType); } } // Ensure that the dest is setup appropriately. if (dest->gtEffectiveVal()->OperIsIndir()) { dest = fgMorphBlockOperand(dest, asgType, size, false /*isBlkReqd*/); } // Ensure that the rhs is setup appropriately. if (isCopyBlock) { src = fgMorphBlockOperand(src, asgType, size, false /*isBlkReqd*/); } // Set the lhs and rhs on the assignment. if (dest != tree->AsOp()->gtOp1) { asg->AsOp()->gtOp1 = dest; } if (src != asg->AsOp()->gtOp2) { asg->AsOp()->gtOp2 = src; } asg->ChangeType(asgType); dest->gtFlags |= GTF_DONT_CSE; asg->gtFlags &= ~GTF_EXCEPT; asg->gtFlags |= ((dest->gtFlags | src->gtFlags) & GTF_ALL_EFFECT); // Un-set GTF_REVERSE_OPS, and it will be set later if appropriate. asg->gtFlags &= ~GTF_REVERSE_OPS; #ifdef DEBUG if (verbose) { printf("fgMorphOneAsgBlock (after):\n"); gtDispTree(tree); } #endif return tree; } return nullptr; } //------------------------------------------------------------------------ // fgMorphPromoteLocalInitBlock: Attempts to promote a local block init tree // to a tree of promoted field initialization assignments. // // Arguments: // destLclNode - The destination LclVar node // initVal - The initialization value // blockSize - The amount of bytes to initialize // // Return Value: // A tree that performs field by field initialization of the destination // struct variable if various conditions are met, nullptr otherwise. // // Notes: // This transforms a single block initialization assignment like: // // * ASG struct (init) // +--* BLK(12) struct // | \--* ADDR long // | \--* LCL_VAR struct(P) V02 loc0 // | \--* int V02.a (offs=0x00) -> V06 tmp3 // | \--* ubyte V02.c (offs=0x04) -> V07 tmp4 // | \--* float V02.d (offs=0x08) -> V08 tmp5 // \--* INIT_VAL int // \--* CNS_INT int 42 // // into a COMMA tree of assignments that initialize each promoted struct // field: // // * COMMA void // +--* COMMA void // | +--* ASG int // | | +--* LCL_VAR int V06 tmp3 // | | \--* CNS_INT int 0x2A2A2A2A // | \--* ASG ubyte // | +--* LCL_VAR ubyte V07 tmp4 // | \--* CNS_INT int 42 // \--* ASG float // +--* LCL_VAR float V08 tmp5 // \--* CNS_DBL float 1.5113661732714390e-13 // GenTree* Compiler::fgMorphPromoteLocalInitBlock(GenTreeLclVar* destLclNode, GenTree* initVal, unsigned blockSize) { assert(destLclNode->OperIs(GT_LCL_VAR)); LclVarDsc* destLclVar = lvaGetDesc(destLclNode); assert(varTypeIsStruct(destLclVar->TypeGet())); assert(destLclVar->lvPromoted); if (blockSize == 0) { JITDUMP(" size is zero or unknown.\n"); return nullptr; } if (destLclVar->IsAddressExposed() && destLclVar->lvContainsHoles) { JITDUMP(" dest is address exposed and contains holes.\n"); return nullptr; } if (destLclVar->lvCustomLayout && destLclVar->lvContainsHoles) { // TODO-1stClassStructs: there are no reasons for this pessimization, delete it. JITDUMP(" dest has custom layout and contains holes.\n"); return nullptr; } if (destLclVar->lvExactSize != blockSize) { JITDUMP(" dest size mismatch.\n"); return nullptr; } if (!initVal->OperIs(GT_CNS_INT)) { JITDUMP(" source is not constant.\n"); return nullptr; } const int64_t initPattern = (initVal->AsIntCon()->IconValue() & 0xFF) * 0x0101010101010101LL; if (initPattern != 0) { for (unsigned i = 0; i < destLclVar->lvFieldCnt; ++i) { LclVarDsc* fieldDesc = lvaGetDesc(destLclVar->lvFieldLclStart + i); if (varTypeIsSIMD(fieldDesc->TypeGet()) || varTypeIsGC(fieldDesc->TypeGet())) { // Cannot initialize GC or SIMD types with a non-zero constant. // The former is completly bogus. The later restriction could be // lifted by supporting non-zero SIMD constants or by generating // field initialization code that converts an integer constant to // the appropiate SIMD value. Unlikely to be very useful, though. JITDUMP(" dest contains GC and/or SIMD fields and source constant is not 0.\n"); return nullptr; } } } JITDUMP(" using field by field initialization.\n"); GenTree* tree = nullptr; for (unsigned i = 0; i < destLclVar->lvFieldCnt; ++i) { unsigned fieldLclNum = destLclVar->lvFieldLclStart + i; LclVarDsc* fieldDesc = lvaGetDesc(fieldLclNum); GenTree* dest = gtNewLclvNode(fieldLclNum, fieldDesc->TypeGet()); // If it had been labeled a "USEASG", assignments to the individual promoted fields are not. dest->gtFlags |= (destLclNode->gtFlags & ~(GTF_NODE_MASK | GTF_VAR_USEASG)); GenTree* src; switch (dest->TypeGet()) { case TYP_BOOL: case TYP_BYTE: case TYP_UBYTE: case TYP_SHORT: case TYP_USHORT: // Promoted fields are expected to be "normalize on load". If that changes then // we may need to adjust this code to widen the constant correctly. assert(fieldDesc->lvNormalizeOnLoad()); FALLTHROUGH; case TYP_INT: { int64_t mask = (int64_t(1) << (genTypeSize(dest->TypeGet()) * 8)) - 1; src = gtNewIconNode(static_cast<int32_t>(initPattern & mask)); break; } case TYP_LONG: src = gtNewLconNode(initPattern); break; case TYP_FLOAT: float floatPattern; memcpy(&floatPattern, &initPattern, sizeof(floatPattern)); src = gtNewDconNode(floatPattern, dest->TypeGet()); break; case TYP_DOUBLE: double doublePattern; memcpy(&doublePattern, &initPattern, sizeof(doublePattern)); src = gtNewDconNode(doublePattern, dest->TypeGet()); break; case TYP_REF: case TYP_BYREF: #ifdef FEATURE_SIMD case TYP_SIMD8: case TYP_SIMD12: case TYP_SIMD16: case TYP_SIMD32: #endif // FEATURE_SIMD assert(initPattern == 0); src = gtNewIconNode(0, dest->TypeGet()); break; default: unreached(); } GenTree* asg = gtNewAssignNode(dest, src); if (optLocalAssertionProp) { optAssertionGen(asg); } if (tree != nullptr) { tree = gtNewOperNode(GT_COMMA, TYP_VOID, tree, asg); } else { tree = asg; } } return tree; } //------------------------------------------------------------------------ // fgMorphGetStructAddr: Gets the address of a struct object // // Arguments: // pTree - the parent's pointer to the struct object node // clsHnd - the class handle for the struct type // isRValue - true if this is a source (not dest) // // Return Value: // Returns the address of the struct value, possibly modifying the existing tree to // sink the address below any comma nodes (this is to canonicalize for value numbering). // If this is a source, it will morph it to an GT_IND before taking its address, // since it may not be remorphed (and we don't want blk nodes as rvalues). GenTree* Compiler::fgMorphGetStructAddr(GenTree** pTree, CORINFO_CLASS_HANDLE clsHnd, bool isRValue) { GenTree* addr; GenTree* tree = *pTree; // If this is an indirection, we can return its op1, unless it's a GTF_IND_ARR_INDEX, in which case we // need to hang onto that for the purposes of value numbering. if (tree->OperIsIndir()) { if ((tree->gtFlags & GTF_IND_ARR_INDEX) == 0) { addr = tree->AsOp()->gtOp1; } else { if (isRValue && tree->OperIsBlk()) { tree->ChangeOper(GT_IND); } addr = gtNewOperNode(GT_ADDR, TYP_BYREF, tree); } } else if (tree->gtOper == GT_COMMA) { // If this is a comma, we're going to "sink" the GT_ADDR below it. (void)fgMorphGetStructAddr(&(tree->AsOp()->gtOp2), clsHnd, isRValue); tree->gtType = TYP_BYREF; addr = tree; } else { switch (tree->gtOper) { case GT_LCL_FLD: case GT_LCL_VAR: case GT_INDEX: case GT_FIELD: case GT_ARR_ELEM: addr = gtNewOperNode(GT_ADDR, TYP_BYREF, tree); break; case GT_INDEX_ADDR: addr = tree; break; default: { // TODO: Consider using lvaGrabTemp and gtNewTempAssign instead, since we're // not going to use "temp" GenTree* temp = fgInsertCommaFormTemp(pTree, clsHnd); unsigned lclNum = temp->gtEffectiveVal()->AsLclVar()->GetLclNum(); lvaSetVarDoNotEnregister(lclNum DEBUG_ARG(DoNotEnregisterReason::VMNeedsStackAddr)); addr = fgMorphGetStructAddr(pTree, clsHnd, isRValue); break; } } } *pTree = addr; return addr; } //------------------------------------------------------------------------ // fgMorphBlockOperand: Canonicalize an operand of a block assignment // // Arguments: // tree - The block operand // asgType - The type of the assignment // blockWidth - The size of the block // isBlkReqd - true iff this operand must remain a block node // // Return Value: // Returns the morphed block operand // // Notes: // This does the following: // - Ensures that a struct operand is a block node or lclVar. // - Ensures that any COMMAs are above ADDR nodes. // Although 'tree' WAS an operand of a block assignment, the assignment // may have been retyped to be a scalar assignment. GenTree* Compiler::fgMorphBlockOperand(GenTree* tree, var_types asgType, unsigned blockWidth, bool isBlkReqd) { GenTree* effectiveVal = tree->gtEffectiveVal(); if (asgType != TYP_STRUCT) { if (effectiveVal->OperIsIndir()) { if (!isBlkReqd) { GenTree* addr = effectiveVal->AsIndir()->Addr(); if ((addr->OperGet() == GT_ADDR) && (addr->gtGetOp1()->TypeGet() == asgType)) { effectiveVal = addr->gtGetOp1(); } else if (effectiveVal->OperIsBlk()) { effectiveVal->SetOper(GT_IND); } } effectiveVal->gtType = asgType; } else if (effectiveVal->TypeGet() != asgType) { if (effectiveVal->IsCall()) { #ifdef DEBUG GenTreeCall* call = effectiveVal->AsCall(); assert(call->TypeGet() == TYP_STRUCT); assert(blockWidth == info.compCompHnd->getClassSize(call->gtRetClsHnd)); #endif } else { GenTree* addr = gtNewOperNode(GT_ADDR, TYP_BYREF, effectiveVal); effectiveVal = gtNewIndir(asgType, addr); } } } else { GenTreeIndir* indirTree = nullptr; GenTreeLclVarCommon* lclNode = nullptr; bool needsIndirection = true; if (effectiveVal->OperIsIndir()) { indirTree = effectiveVal->AsIndir(); GenTree* addr = effectiveVal->AsIndir()->Addr(); if ((addr->OperGet() == GT_ADDR) && (addr->gtGetOp1()->OperGet() == GT_LCL_VAR)) { lclNode = addr->gtGetOp1()->AsLclVarCommon(); } } else if (effectiveVal->OperGet() == GT_LCL_VAR) { lclNode = effectiveVal->AsLclVarCommon(); } else if (effectiveVal->IsCall()) { needsIndirection = false; #ifdef DEBUG GenTreeCall* call = effectiveVal->AsCall(); assert(call->TypeGet() == TYP_STRUCT); assert(blockWidth == info.compCompHnd->getClassSize(call->gtRetClsHnd)); #endif } #ifdef TARGET_ARM64 else if (effectiveVal->OperIsHWIntrinsic()) { needsIndirection = false; #ifdef DEBUG GenTreeHWIntrinsic* intrinsic = effectiveVal->AsHWIntrinsic(); assert(intrinsic->TypeGet() == TYP_STRUCT); assert(HWIntrinsicInfo::IsMultiReg(intrinsic->GetHWIntrinsicId())); #endif } #endif // TARGET_ARM64 if (lclNode != nullptr) { const LclVarDsc* varDsc = lvaGetDesc(lclNode); if (varTypeIsStruct(varDsc) && (varDsc->lvExactSize == blockWidth) && (varDsc->lvType == asgType)) { if (effectiveVal != lclNode) { JITDUMP("Replacing block node [%06d] with lclVar V%02u\n", dspTreeID(tree), lclNode->GetLclNum()); effectiveVal = lclNode; } needsIndirection = false; } else { // This may be a lclVar that was determined to be address-exposed. effectiveVal->gtFlags |= (lclNode->gtFlags & GTF_ALL_EFFECT); } } if (needsIndirection) { if (indirTree != nullptr) { // If we have an indirection and a block is required, it should already be a block. assert(indirTree->OperIsBlk() || !isBlkReqd); effectiveVal->gtType = asgType; } else { GenTree* newTree; GenTree* addr = gtNewOperNode(GT_ADDR, TYP_BYREF, effectiveVal); if (isBlkReqd) { CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleIfPresent(effectiveVal); if (clsHnd == NO_CLASS_HANDLE) { newTree = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, addr, typGetBlkLayout(blockWidth)); } else { newTree = gtNewObjNode(clsHnd, addr); gtSetObjGcInfo(newTree->AsObj()); } } else { newTree = gtNewIndir(asgType, addr); } effectiveVal = newTree; } } } assert(effectiveVal->TypeIs(asgType) || (varTypeIsSIMD(asgType) && varTypeIsStruct(effectiveVal))); tree = effectiveVal; return tree; } //------------------------------------------------------------------------ // fgMorphCanUseLclFldForCopy: check if we can access LclVar2 using LclVar1's fields. // // Arguments: // lclNum1 - a promoted lclVar that is used in fieldwise assignment; // lclNum2 - the local variable on the other side of ASG, can be BAD_VAR_NUM. // // Return Value: // True if the second local is valid and has the same struct handle as the first, // false otherwise. // // Notes: // This check is needed to avoid accessing LCL_VARs with incorrect // CORINFO_FIELD_HANDLE that would confuse VN optimizations. // bool Compiler::fgMorphCanUseLclFldForCopy(unsigned lclNum1, unsigned lclNum2) { assert(lclNum1 != BAD_VAR_NUM); if (lclNum2 == BAD_VAR_NUM) { return false; } const LclVarDsc* varDsc1 = lvaGetDesc(lclNum1); const LclVarDsc* varDsc2 = lvaGetDesc(lclNum2); assert(varTypeIsStruct(varDsc1)); if (!varTypeIsStruct(varDsc2)) { return false; } CORINFO_CLASS_HANDLE struct1 = varDsc1->GetStructHnd(); CORINFO_CLASS_HANDLE struct2 = varDsc2->GetStructHnd(); assert(struct1 != NO_CLASS_HANDLE); assert(struct2 != NO_CLASS_HANDLE); if (struct1 != struct2) { return false; } return true; } // insert conversions and normalize to make tree amenable to register // FP architectures GenTree* Compiler::fgMorphForRegisterFP(GenTree* tree) { if (tree->OperIsArithmetic()) { if (varTypeIsFloating(tree)) { GenTree* op1 = tree->AsOp()->gtOp1; GenTree* op2 = tree->gtGetOp2(); assert(varTypeIsFloating(op1->TypeGet()) && varTypeIsFloating(op2->TypeGet())); if (op1->TypeGet() != tree->TypeGet()) { tree->AsOp()->gtOp1 = gtNewCastNode(tree->TypeGet(), op1, false, tree->TypeGet()); } if (op2->TypeGet() != tree->TypeGet()) { tree->AsOp()->gtOp2 = gtNewCastNode(tree->TypeGet(), op2, false, tree->TypeGet()); } } } else if (tree->OperIsCompare()) { GenTree* op1 = tree->AsOp()->gtOp1; if (varTypeIsFloating(op1)) { GenTree* op2 = tree->gtGetOp2(); assert(varTypeIsFloating(op2)); if (op1->TypeGet() != op2->TypeGet()) { // both had better be floating, just one bigger than other if (op1->TypeGet() == TYP_FLOAT) { assert(op2->TypeGet() == TYP_DOUBLE); tree->AsOp()->gtOp1 = gtNewCastNode(TYP_DOUBLE, op1, false, TYP_DOUBLE); } else if (op2->TypeGet() == TYP_FLOAT) { assert(op1->TypeGet() == TYP_DOUBLE); tree->AsOp()->gtOp2 = gtNewCastNode(TYP_DOUBLE, op2, false, TYP_DOUBLE); } } } } return tree; } #ifdef FEATURE_SIMD //-------------------------------------------------------------------------------------------------------------- // getSIMDStructFromField: // Checking whether the field belongs to a simd struct or not. If it is, return the GenTree* for // the struct node, also base type, field index and simd size. If it is not, just return nullptr. // Usually if the tree node is from a simd lclvar which is not used in any SIMD intrinsic, then we // should return nullptr, since in this case we should treat SIMD struct as a regular struct. // However if no matter what, you just want get simd struct node, you can set the ignoreUsedInSIMDIntrinsic // as true. Then there will be no IsUsedInSIMDIntrinsic checking, and it will return SIMD struct node // if the struct is a SIMD struct. // // Arguments: // tree - GentreePtr. This node will be checked to see this is a field which belongs to a simd // struct used for simd intrinsic or not. // simdBaseJitTypeOut - CorInfoType pointer, if the tree node is the tree we want, we set *simdBaseJitTypeOut // to simd lclvar's base JIT type. // indexOut - unsigned pointer, if the tree is used for simd intrinsic, we will set *indexOut // equals to the index number of this field. // simdSizeOut - unsigned pointer, if the tree is used for simd intrinsic, set the *simdSizeOut // equals to the simd struct size which this tree belongs to. // ignoreUsedInSIMDIntrinsic - bool. If this is set to true, then this function will ignore // the UsedInSIMDIntrinsic check. // // return value: // A GenTree* which points the simd lclvar tree belongs to. If the tree is not the simd // instrinic related field, return nullptr. // GenTree* Compiler::getSIMDStructFromField(GenTree* tree, CorInfoType* simdBaseJitTypeOut, unsigned* indexOut, unsigned* simdSizeOut, bool ignoreUsedInSIMDIntrinsic /*false*/) { GenTree* ret = nullptr; if (tree->OperGet() == GT_FIELD) { GenTree* objRef = tree->AsField()->GetFldObj(); if (objRef != nullptr) { GenTree* obj = nullptr; if (objRef->gtOper == GT_ADDR) { obj = objRef->AsOp()->gtOp1; } else if (ignoreUsedInSIMDIntrinsic) { obj = objRef; } else { return nullptr; } if (isSIMDTypeLocal(obj)) { LclVarDsc* varDsc = lvaGetDesc(obj->AsLclVarCommon()); if (varDsc->lvIsUsedInSIMDIntrinsic() || ignoreUsedInSIMDIntrinsic) { *simdSizeOut = varDsc->lvExactSize; *simdBaseJitTypeOut = getBaseJitTypeOfSIMDLocal(obj); ret = obj; } } else if (obj->OperGet() == GT_SIMD) { ret = obj; GenTreeSIMD* simdNode = obj->AsSIMD(); *simdSizeOut = simdNode->GetSimdSize(); *simdBaseJitTypeOut = simdNode->GetSimdBaseJitType(); } #ifdef FEATURE_HW_INTRINSICS else if (obj->OperIsHWIntrinsic()) { ret = obj; GenTreeHWIntrinsic* simdNode = obj->AsHWIntrinsic(); *simdSizeOut = simdNode->GetSimdSize(); *simdBaseJitTypeOut = simdNode->GetSimdBaseJitType(); } #endif // FEATURE_HW_INTRINSICS } } if (ret != nullptr) { var_types fieldType = tree->TypeGet(); if (fieldType == TYP_LONG) { // Vector2/3/4 expose public float fields while Vector<T> // and Vector64/128/256<T> have internal ulong fields. So // we should only ever encounter accesses for TYP_FLOAT or // TYP_LONG and in the case of the latter we don't want the // generic type since we are executing some algorithm on the // raw underlying bits instead. *simdBaseJitTypeOut = CORINFO_TYPE_ULONG; } else { assert(fieldType == TYP_FLOAT); } unsigned baseTypeSize = genTypeSize(JITtype2varType(*simdBaseJitTypeOut)); *indexOut = tree->AsField()->gtFldOffset / baseTypeSize; } return ret; } /***************************************************************************** * If a read operation tries to access simd struct field, then transform the operation * to the SimdGetElementNode, and return the new tree. Otherwise, return the old tree. * Argument: * tree - GenTree*. If this pointer points to simd struct which is used for simd * intrinsic, we will morph it as simd intrinsic NI_Vector128_GetElement. * Return: * A GenTree* which points to the new tree. If the tree is not for simd intrinsic, * return nullptr. */ GenTree* Compiler::fgMorphFieldToSimdGetElement(GenTree* tree) { unsigned index = 0; CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF; unsigned simdSize = 0; GenTree* simdStructNode = getSIMDStructFromField(tree, &simdBaseJitType, &index, &simdSize); if (simdStructNode != nullptr) { var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); GenTree* op2 = gtNewIconNode(index, TYP_INT); assert(simdSize <= 16); assert(simdSize >= ((index + 1) * genTypeSize(simdBaseType))); tree = gtNewSimdGetElementNode(simdBaseType, simdStructNode, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } return tree; } /***************************************************************************** * Transform an assignment of a SIMD struct field to SimdWithElementNode, and * return a new tree. If it is not such an assignment, then return the old tree. * Argument: * tree - GenTree*. If this pointer points to simd struct which is used for simd * intrinsic, we will morph it as simd intrinsic set. * Return: * A GenTree* which points to the new tree. If the tree is not for simd intrinsic, * return nullptr. */ GenTree* Compiler::fgMorphFieldAssignToSimdSetElement(GenTree* tree) { assert(tree->OperGet() == GT_ASG); unsigned index = 0; CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF; unsigned simdSize = 0; GenTree* simdStructNode = getSIMDStructFromField(tree->gtGetOp1(), &simdBaseJitType, &index, &simdSize); if (simdStructNode != nullptr) { var_types simdType = simdStructNode->gtType; var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(simdSize <= 16); assert(simdSize >= ((index + 1) * genTypeSize(simdBaseType))); GenTree* op2 = gtNewIconNode(index, TYP_INT); GenTree* op3 = tree->gtGetOp2(); NamedIntrinsic intrinsicId = NI_Vector128_WithElement; GenTree* target = gtClone(simdStructNode); assert(target != nullptr); GenTree* simdTree = gtNewSimdWithElementNode(simdType, simdStructNode, op2, op3, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); tree->AsOp()->gtOp1 = target; tree->AsOp()->gtOp2 = simdTree; // fgMorphTree has already called fgMorphImplicitByRefArgs() on this assignment, but the source // and target have not yet been morphed. // Therefore, in case the source and/or target are now implicit byrefs, we need to call it again. if (fgMorphImplicitByRefArgs(tree)) { if (tree->gtGetOp1()->OperIsBlk()) { assert(tree->gtGetOp1()->TypeGet() == simdType); tree->gtGetOp1()->SetOper(GT_IND); tree->gtGetOp1()->gtType = simdType; } } #ifdef DEBUG tree->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif } return tree; } #endif // FEATURE_SIMD //------------------------------------------------------------------------------ // fgMorphCommutative : Try to simplify "(X op C1) op C2" to "X op C3" // for commutative operators. // // Arguments: // tree - node to fold // // return value: // A folded GenTree* instance or nullptr if something prevents folding. // GenTreeOp* Compiler::fgMorphCommutative(GenTreeOp* tree) { assert(varTypeIsIntegralOrI(tree->TypeGet())); assert(tree->OperIs(GT_ADD, GT_MUL, GT_OR, GT_AND, GT_XOR)); // op1 can be GT_COMMA, in this case we're going to fold // "(op (COMMA(... (op X C1))) C2)" to "(COMMA(... (op X C3)))" GenTree* op1 = tree->gtGetOp1()->gtEffectiveVal(true); genTreeOps oper = tree->OperGet(); if (!op1->OperIs(oper) || !tree->gtGetOp2()->IsCnsIntOrI() || !op1->gtGetOp2()->IsCnsIntOrI() || op1->gtGetOp1()->IsCnsIntOrI()) { return nullptr; } if (!fgGlobalMorph && (op1 != tree->gtGetOp1())) { // Since 'tree->gtGetOp1()' can have complex structure (e.g. COMMA(..(COMMA(..,op1))) // don't run the optimization for such trees outside of global morph. // Otherwise, there is a chance of violating VNs invariants and/or modifying a tree // that is an active CSE candidate. return nullptr; } if (gtIsActiveCSE_Candidate(tree) || gtIsActiveCSE_Candidate(op1)) { // The optimization removes 'tree' from IR and changes the value of 'op1'. return nullptr; } if (tree->OperMayOverflow() && (tree->gtOverflow() || op1->gtOverflow())) { return nullptr; } GenTreeIntCon* cns1 = op1->gtGetOp2()->AsIntCon(); GenTreeIntCon* cns2 = tree->gtGetOp2()->AsIntCon(); if (!varTypeIsIntegralOrI(tree->TypeGet()) || cns1->TypeIs(TYP_REF) || !cns1->TypeIs(cns2->TypeGet())) { return nullptr; } if (gtIsActiveCSE_Candidate(cns1) || gtIsActiveCSE_Candidate(cns2)) { // The optimization removes 'cns2' from IR and changes the value of 'cns1'. return nullptr; } GenTree* folded = gtFoldExprConst(gtNewOperNode(oper, cns1->TypeGet(), cns1, cns2)); if (!folded->IsCnsIntOrI()) { // Give up if we can't fold "C1 op C2" return nullptr; } auto foldedCns = folded->AsIntCon(); cns1->SetIconValue(foldedCns->IconValue()); cns1->SetVNsFromNode(foldedCns); cns1->gtFieldSeq = foldedCns->gtFieldSeq; op1 = tree->gtGetOp1(); op1->SetVNsFromNode(tree); DEBUG_DESTROY_NODE(tree); DEBUG_DESTROY_NODE(cns2); DEBUG_DESTROY_NODE(foldedCns); INDEBUG(cns1->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED); return op1->AsOp(); } //------------------------------------------------------------------------------ // fgMorphCastedBitwiseOp : Try to simplify "(T)x op (T)y" to "(T)(x op y)". // // Arguments: // tree - node to fold // // Return Value: // A folded GenTree* instance, or nullptr if it couldn't be folded GenTree* Compiler::fgMorphCastedBitwiseOp(GenTreeOp* tree) { // This transform does not preserve VNs and deletes a node. assert(fgGlobalMorph); assert(varTypeIsIntegralOrI(tree)); assert(tree->OperIs(GT_OR, GT_AND, GT_XOR)); GenTree* op1 = tree->gtGetOp1(); GenTree* op2 = tree->gtGetOp2(); genTreeOps oper = tree->OperGet(); // see whether both ops are casts, with matching to and from types. if (op1->OperIs(GT_CAST) && op2->OperIs(GT_CAST)) { // bail if either operand is a checked cast if (op1->gtOverflow() || op2->gtOverflow()) { return nullptr; } var_types fromType = op1->AsCast()->CastOp()->TypeGet(); var_types toType = op1->AsCast()->CastToType(); bool isUnsigned = op1->IsUnsigned(); if (varTypeIsFloating(fromType) || (op2->CastFromType() != fromType) || (op2->CastToType() != toType) || (op2->IsUnsigned() != isUnsigned)) { return nullptr; } /* // Reuse gentree nodes: // // tree op1 // / \ | // op1 op2 ==> tree // | | / \. // x y x y // // (op2 becomes garbage) */ tree->gtOp1 = op1->AsCast()->CastOp(); tree->gtOp2 = op2->AsCast()->CastOp(); tree->gtType = genActualType(fromType); op1->gtType = genActualType(toType); op1->AsCast()->gtOp1 = tree; op1->AsCast()->CastToType() = toType; op1->SetAllEffectsFlags(tree); // no need to update isUnsigned DEBUG_DESTROY_NODE(op2); INDEBUG(op1->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED); return op1; } return nullptr; } /***************************************************************************** * * Transform the given GTK_SMPOP tree for code generation. */ #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function #endif GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac) { ALLOCA_CHECK(); assert(tree->OperKind() & GTK_SMPOP); /* The steps in this function are : o Perform required preorder processing o Process the first, then second operand, if any o Perform required postorder morphing o Perform optional postorder morphing if optimizing */ bool isQmarkColon = false; AssertionIndex origAssertionCount = DUMMY_INIT(0); AssertionDsc* origAssertionTab = DUMMY_INIT(NULL); AssertionIndex thenAssertionCount = DUMMY_INIT(0); AssertionDsc* thenAssertionTab = DUMMY_INIT(NULL); if (fgGlobalMorph) { tree = fgMorphForRegisterFP(tree); } genTreeOps oper = tree->OperGet(); var_types typ = tree->TypeGet(); GenTree* op1 = tree->AsOp()->gtOp1; GenTree* op2 = tree->gtGetOp2IfPresent(); /*------------------------------------------------------------------------- * First do any PRE-ORDER processing */ switch (oper) { // Some arithmetic operators need to use a helper call to the EE int helper; case GT_ASG: tree = fgDoNormalizeOnStore(tree); /* fgDoNormalizeOnStore can change op2 */ noway_assert(op1 == tree->AsOp()->gtOp1); op2 = tree->AsOp()->gtOp2; #ifdef FEATURE_SIMD if (IsBaselineSimdIsaSupported()) { // We should check whether op2 should be assigned to a SIMD field or not. // If it is, we should tranlate the tree to simd intrinsic. assert(!fgGlobalMorph || ((tree->gtDebugFlags & GTF_DEBUG_NODE_MORPHED) == 0)); GenTree* newTree = fgMorphFieldAssignToSimdSetElement(tree); typ = tree->TypeGet(); op1 = tree->gtGetOp1(); op2 = tree->gtGetOp2(); #ifdef DEBUG assert((tree == newTree) && (tree->OperGet() == oper)); if ((tree->gtDebugFlags & GTF_DEBUG_NODE_MORPHED) != 0) { tree->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED; } #endif // DEBUG } #endif // We can't CSE the LHS of an assignment. Only r-values can be CSEed. // Previously, the "lhs" (addr) of a block op was CSE'd. So, to duplicate the former // behavior, allow CSE'ing if is a struct type (or a TYP_REF transformed from a struct type) // TODO-1stClassStructs: improve this. if (op1->IsLocal() || (op1->TypeGet() != TYP_STRUCT)) { op1->gtFlags |= GTF_DONT_CSE; } break; case GT_ADDR: /* op1 of a GT_ADDR is an l-value. Only r-values can be CSEed */ op1->gtFlags |= GTF_DONT_CSE; break; case GT_QMARK: case GT_JTRUE: noway_assert(op1); if (op1->OperIsCompare()) { /* Mark the comparison node with GTF_RELOP_JMP_USED so it knows that it does not need to materialize the result as a 0 or 1. */ /* We also mark it as DONT_CSE, as we don't handle QMARKs with nonRELOP op1s */ op1->gtFlags |= (GTF_RELOP_JMP_USED | GTF_DONT_CSE); // Request that the codegen for op1 sets the condition flags // when it generates the code for op1. // // Codegen for op1 must set the condition flags if // this method returns true. // op1->gtRequestSetFlags(); } else { GenTree* effOp1 = op1->gtEffectiveVal(); noway_assert((effOp1->gtOper == GT_CNS_INT) && (effOp1->IsIntegralConst(0) || effOp1->IsIntegralConst(1))); } break; case GT_COLON: if (optLocalAssertionProp) { isQmarkColon = true; } break; case GT_FIELD: return fgMorphField(tree, mac); case GT_INDEX: return fgMorphArrayIndex(tree); case GT_CAST: { GenTree* morphedCast = fgMorphExpandCast(tree->AsCast()); if (morphedCast != nullptr) { return morphedCast; } op1 = tree->AsCast()->CastOp(); } break; case GT_MUL: noway_assert(op2 != nullptr); if (opts.OptimizationEnabled() && !optValnumCSE_phase && !tree->gtOverflow()) { // MUL(NEG(a), C) => MUL(a, NEG(C)) if (op1->OperIs(GT_NEG) && !op1->gtGetOp1()->IsCnsIntOrI() && op2->IsCnsIntOrI() && !op2->IsIconHandle()) { GenTree* newOp1 = op1->gtGetOp1(); GenTree* newConst = gtNewIconNode(-op2->AsIntCon()->IconValue(), op2->TypeGet()); DEBUG_DESTROY_NODE(op1); DEBUG_DESTROY_NODE(op2); tree->AsOp()->gtOp1 = newOp1; tree->AsOp()->gtOp2 = newConst; return fgMorphSmpOp(tree, mac); } } #ifndef TARGET_64BIT if (typ == TYP_LONG) { // For (long)int1 * (long)int2, we dont actually do the // casts, and just multiply the 32 bit values, which will // give us the 64 bit result in edx:eax. if (tree->Is64RsltMul()) { // We are seeing this node again. // Morph only the children of casts, // so as to avoid losing them. tree = fgMorphLongMul(tree->AsOp()); goto DONE_MORPHING_CHILDREN; } tree = fgRecognizeAndMorphLongMul(tree->AsOp()); op1 = tree->AsOp()->gtGetOp1(); op2 = tree->AsOp()->gtGetOp2(); if (tree->Is64RsltMul()) { goto DONE_MORPHING_CHILDREN; } else { if (tree->gtOverflow()) helper = tree->IsUnsigned() ? CORINFO_HELP_ULMUL_OVF : CORINFO_HELP_LMUL_OVF; else helper = CORINFO_HELP_LMUL; goto USE_HELPER_FOR_ARITH; } } #endif // !TARGET_64BIT break; case GT_ARR_LENGTH: if (op1->OperIs(GT_CNS_STR)) { // Optimize `ldstr + String::get_Length()` to CNS_INT // e.g. "Hello".Length => 5 GenTreeIntCon* iconNode = gtNewStringLiteralLength(op1->AsStrCon()); if (iconNode != nullptr) { INDEBUG(iconNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED); return iconNode; } } break; case GT_DIV: // Replace "val / dcon" with "val * (1.0 / dcon)" if dcon is a power of two. // Powers of two within range are always exactly represented, // so multiplication by the reciprocal is safe in this scenario if (fgGlobalMorph && op2->IsCnsFltOrDbl()) { double divisor = op2->AsDblCon()->gtDconVal; if (((typ == TYP_DOUBLE) && FloatingPointUtils::hasPreciseReciprocal(divisor)) || ((typ == TYP_FLOAT) && FloatingPointUtils::hasPreciseReciprocal(forceCastToFloat(divisor)))) { oper = GT_MUL; tree->ChangeOper(oper); op2->AsDblCon()->gtDconVal = 1.0 / divisor; } } // Convert DIV to UDIV if boths op1 and op2 are known to be never negative if (!gtIsActiveCSE_Candidate(tree) && varTypeIsIntegral(tree) && op1->IsNeverNegative(this) && op2->IsNeverNegative(this)) { assert(tree->OperIs(GT_DIV)); tree->ChangeOper(GT_UDIV, GenTree::PRESERVE_VN); return fgMorphSmpOp(tree, mac); } #ifndef TARGET_64BIT if (typ == TYP_LONG) { helper = CORINFO_HELP_LDIV; goto USE_HELPER_FOR_ARITH; } #if USE_HELPERS_FOR_INT_DIV if (typ == TYP_INT) { helper = CORINFO_HELP_DIV; goto USE_HELPER_FOR_ARITH; } #endif #endif // !TARGET_64BIT break; case GT_UDIV: #ifndef TARGET_64BIT if (typ == TYP_LONG) { helper = CORINFO_HELP_ULDIV; goto USE_HELPER_FOR_ARITH; } #if USE_HELPERS_FOR_INT_DIV if (typ == TYP_INT) { helper = CORINFO_HELP_UDIV; goto USE_HELPER_FOR_ARITH; } #endif #endif // TARGET_64BIT break; case GT_MOD: if (varTypeIsFloating(typ)) { helper = CORINFO_HELP_DBLREM; noway_assert(op2); if (op1->TypeGet() == TYP_FLOAT) { if (op2->TypeGet() == TYP_FLOAT) { helper = CORINFO_HELP_FLTREM; } else { tree->AsOp()->gtOp1 = op1 = gtNewCastNode(TYP_DOUBLE, op1, false, TYP_DOUBLE); } } else if (op2->TypeGet() == TYP_FLOAT) { tree->AsOp()->gtOp2 = op2 = gtNewCastNode(TYP_DOUBLE, op2, false, TYP_DOUBLE); } goto USE_HELPER_FOR_ARITH; } // Convert MOD to UMOD if boths op1 and op2 are known to be never negative if (!gtIsActiveCSE_Candidate(tree) && varTypeIsIntegral(tree) && op1->IsNeverNegative(this) && op2->IsNeverNegative(this)) { assert(tree->OperIs(GT_MOD)); tree->ChangeOper(GT_UMOD, GenTree::PRESERVE_VN); return fgMorphSmpOp(tree, mac); } // Do not use optimizations (unlike UMOD's idiv optimizing during codegen) for signed mod. // A similar optimization for signed mod will not work for a negative perfectly divisible // HI-word. To make it correct, we would need to divide without the sign and then flip the // result sign after mod. This requires 18 opcodes + flow making it not worthy to inline. goto ASSIGN_HELPER_FOR_MOD; case GT_UMOD: #ifdef TARGET_ARMARCH // // Note for TARGET_ARMARCH we don't have a remainder instruction, so we don't do this optimization // #else // TARGET_XARCH // If this is an unsigned long mod with a constant divisor, // then don't morph to a helper call - it can be done faster inline using idiv. noway_assert(op2); if ((typ == TYP_LONG) && opts.OptEnabled(CLFLG_CONSTANTFOLD)) { if (op2->OperIs(GT_CNS_NATIVELONG) && op2->AsIntConCommon()->LngValue() >= 2 && op2->AsIntConCommon()->LngValue() <= 0x3fffffff) { tree->AsOp()->gtOp1 = op1 = fgMorphTree(op1); noway_assert(op1->TypeIs(TYP_LONG)); // Update flags for op1 morph. tree->gtFlags &= ~GTF_ALL_EFFECT; // Only update with op1 as op2 is a constant. tree->gtFlags |= (op1->gtFlags & GTF_ALL_EFFECT); // If op1 is a constant, then do constant folding of the division operator. if (op1->OperIs(GT_CNS_NATIVELONG)) { tree = gtFoldExpr(tree); } if (!tree->OperIsConst()) { tree->AsOp()->CheckDivideByConstOptimized(this); } return tree; } } #endif // TARGET_XARCH ASSIGN_HELPER_FOR_MOD: // For "val % 1", return 0 if op1 doesn't have any side effects // and we are not in the CSE phase, we cannot discard 'tree' // because it may contain CSE expressions that we haven't yet examined. // if (((op1->gtFlags & GTF_SIDE_EFFECT) == 0) && !optValnumCSE_phase) { if (op2->IsIntegralConst(1)) { GenTree* zeroNode = gtNewZeroConNode(typ); #ifdef DEBUG zeroNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif DEBUG_DESTROY_NODE(tree); return zeroNode; } } #ifndef TARGET_64BIT if (typ == TYP_LONG) { helper = (oper == GT_UMOD) ? CORINFO_HELP_ULMOD : CORINFO_HELP_LMOD; goto USE_HELPER_FOR_ARITH; } #if USE_HELPERS_FOR_INT_DIV if (typ == TYP_INT) { if (oper == GT_UMOD) { helper = CORINFO_HELP_UMOD; goto USE_HELPER_FOR_ARITH; } else if (oper == GT_MOD) { helper = CORINFO_HELP_MOD; goto USE_HELPER_FOR_ARITH; } } #endif #endif // !TARGET_64BIT #ifdef TARGET_ARM64 // For ARM64 we don't have a remainder instruction, // The architecture manual suggests the following transformation to // generate code for such operator: // // a % b = a - (a / b) * b; // // TODO: there are special cases where it can be done better, for example // when the modulo operation is unsigned and the divisor is a // integer constant power of two. In this case, we can make the transform: // // a % b = a & (b - 1); // // Lower supports it for all cases except when `a` is constant, but // in Morph we can't guarantee that `a` won't be transformed into a constant, // so can't guarantee that lower will be able to do this optimization. { // Do "a % b = a - (a / b) * b" morph always, see TODO before this block. bool doMorphModToSubMulDiv = true; if (doMorphModToSubMulDiv) { assert(!optValnumCSE_phase); tree = fgMorphModToSubMulDiv(tree->AsOp()); op1 = tree->AsOp()->gtOp1; op2 = tree->AsOp()->gtOp2; } } #else // !TARGET_ARM64 // If b is not a power of 2 constant then lowering replaces a % b // with a - (a / b) * b and applies magic division optimization to // a / b. The code may already contain an a / b expression (e.g. // x = a / 10; y = a % 10;) and then we end up with redundant code. // If we convert % to / here we give CSE the opportunity to eliminate // the redundant division. If there's no redundant division then // nothing is lost, lowering would have done this transform anyway. if (!optValnumCSE_phase && ((tree->OperGet() == GT_MOD) && op2->IsIntegralConst())) { ssize_t divisorValue = op2->AsIntCon()->IconValue(); size_t absDivisorValue = (divisorValue == SSIZE_T_MIN) ? static_cast<size_t>(divisorValue) : static_cast<size_t>(abs(divisorValue)); if (!isPow2(absDivisorValue)) { tree = fgMorphModToSubMulDiv(tree->AsOp()); op1 = tree->AsOp()->gtOp1; op2 = tree->AsOp()->gtOp2; } } #endif // !TARGET_ARM64 break; USE_HELPER_FOR_ARITH: { // TODO: this comment is wrong now, do an appropriate fix. /* We have to morph these arithmetic operations into helper calls before morphing the arguments (preorder), else the arguments won't get correct values of fgPtrArgCntCur. However, try to fold the tree first in case we end up with a simple node which won't need a helper call at all */ noway_assert(tree->OperIsBinary()); GenTree* oldTree = tree; tree = gtFoldExpr(tree); // Were we able to fold it ? // Note that gtFoldExpr may return a non-leaf even if successful // e.g. for something like "expr / 1" - see also bug #290853 if (tree->OperIsLeaf() || (oldTree != tree)) { return (oldTree != tree) ? fgMorphTree(tree) : fgMorphLeaf(tree); } // Did we fold it into a comma node with throw? if (tree->gtOper == GT_COMMA) { noway_assert(fgIsCommaThrow(tree)); return fgMorphTree(tree); } } return fgMorphIntoHelperCall(tree, helper, gtNewCallArgs(op1, op2)); case GT_RETURN: if (!tree->TypeIs(TYP_VOID)) { if (op1->OperIs(GT_OBJ, GT_BLK, GT_IND)) { op1 = fgMorphRetInd(tree->AsUnOp()); } if (op1->OperIs(GT_LCL_VAR)) { // With a `genReturnBB` this `RETURN(src)` tree will be replaced by a `ASG(genReturnLocal, src)` // and `ASG` will be tranformed into field by field copy without parent local referencing if // possible. GenTreeLclVar* lclVar = op1->AsLclVar(); unsigned lclNum = lclVar->GetLclNum(); if ((genReturnLocal == BAD_VAR_NUM) || (genReturnLocal == lclNum)) { LclVarDsc* varDsc = lvaGetDesc(lclVar); if (varDsc->CanBeReplacedWithItsField(this)) { // We can replace the struct with its only field and allow copy propagation to replace // return value that was written as a field. unsigned fieldLclNum = varDsc->lvFieldLclStart; LclVarDsc* fieldDsc = lvaGetDesc(fieldLclNum); JITDUMP("Replacing an independently promoted local var V%02u with its only field " "V%02u for " "the return [%06u]\n", lclVar->GetLclNum(), fieldLclNum, dspTreeID(tree)); lclVar->SetLclNum(fieldLclNum); lclVar->ChangeType(fieldDsc->lvType); } } } } // normalize small integer return values if (fgGlobalMorph && varTypeIsSmall(info.compRetType) && (op1 != nullptr) && !op1->TypeIs(TYP_VOID) && fgCastNeeded(op1, info.compRetType)) { // Small-typed return values are normalized by the callee op1 = gtNewCastNode(TYP_INT, op1, false, info.compRetType); // Propagate GTF_COLON_COND op1->gtFlags |= (tree->gtFlags & GTF_COLON_COND); tree->AsOp()->gtOp1 = fgMorphTree(op1); // Propagate side effect flags tree->SetAllEffectsFlags(tree->AsOp()->gtGetOp1()); return tree; } break; case GT_EQ: case GT_NE: { GenTree* optimizedTree = gtFoldTypeCompare(tree); if (optimizedTree != tree) { return fgMorphTree(optimizedTree); } // Pattern-matching optimization: // (a % c) ==/!= 0 // for power-of-2 constant `c` // => // a & (c - 1) ==/!= 0 // For integer `a`, even if negative. if (opts.OptimizationEnabled() && !optValnumCSE_phase) { assert(tree->OperIs(GT_EQ, GT_NE)); if (op1->OperIs(GT_MOD) && varTypeIsIntegral(op1) && op2->IsIntegralConst(0)) { GenTree* op1op2 = op1->AsOp()->gtOp2; if (op1op2->IsCnsIntOrI()) { const ssize_t modValue = op1op2->AsIntCon()->IconValue(); if (isPow2(modValue)) { JITDUMP("\nTransforming:\n"); DISPTREE(tree); op1->SetOper(GT_AND); // Change % => & op1op2->AsIntConCommon()->SetIconValue(modValue - 1); // Change c => c - 1 fgUpdateConstTreeValueNumber(op1op2); JITDUMP("\ninto:\n"); DISPTREE(tree); } } } } } FALLTHROUGH; case GT_GT: { // Try and optimize nullable boxes feeding compares GenTree* optimizedTree = gtFoldBoxNullable(tree); if (optimizedTree->OperGet() != tree->OperGet()) { return optimizedTree; } else { tree = optimizedTree; } op1 = tree->AsOp()->gtOp1; op2 = tree->gtGetOp2IfPresent(); break; } case GT_RUNTIMELOOKUP: return fgMorphTree(op1); #ifdef TARGET_ARM case GT_INTRINSIC: if (tree->AsIntrinsic()->gtIntrinsicName == NI_System_Math_Round) { switch (tree->TypeGet()) { case TYP_DOUBLE: return fgMorphIntoHelperCall(tree, CORINFO_HELP_DBLROUND, gtNewCallArgs(op1)); case TYP_FLOAT: return fgMorphIntoHelperCall(tree, CORINFO_HELP_FLTROUND, gtNewCallArgs(op1)); default: unreached(); } } break; #endif case GT_PUTARG_TYPE: return fgMorphTree(tree->AsUnOp()->gtGetOp1()); case GT_NULLCHECK: { op1 = tree->AsUnOp()->gtGetOp1(); if (op1->IsCall()) { GenTreeCall* const call = op1->AsCall(); if (call->IsHelperCall() && s_helperCallProperties.NonNullReturn(eeGetHelperNum(call->gtCallMethHnd))) { JITDUMP("\nNULLCHECK on [%06u] will always succeed\n", dspTreeID(call)); // TODO: Can we also remove the call? // return fgMorphTree(call); } } } break; default: break; } if (opts.OptimizationEnabled() && fgGlobalMorph) { GenTree* morphed = fgMorphReduceAddOps(tree); if (morphed != tree) return fgMorphTree(morphed); } /*------------------------------------------------------------------------- * Process the first operand, if any */ if (op1) { // If we are entering the "then" part of a Qmark-Colon we must // save the state of the current copy assignment table // so that we can restore this state when entering the "else" part if (isQmarkColon) { noway_assert(optLocalAssertionProp); if (optAssertionCount) { noway_assert(optAssertionCount <= optMaxAssertionCount); // else ALLOCA() is a bad idea unsigned tabSize = optAssertionCount * sizeof(AssertionDsc); origAssertionTab = (AssertionDsc*)ALLOCA(tabSize); origAssertionCount = optAssertionCount; memcpy(origAssertionTab, optAssertionTabPrivate, tabSize); } else { origAssertionCount = 0; origAssertionTab = nullptr; } } // We might need a new MorphAddressContext context. (These are used to convey // parent context about how addresses being calculated will be used; see the // specification comment for MorphAddrContext for full details.) // Assume it's an Ind context to start. MorphAddrContext subIndMac1(MACK_Ind); MorphAddrContext* subMac1 = mac; if (subMac1 == nullptr || subMac1->m_kind == MACK_Ind) { switch (tree->gtOper) { case GT_ADDR: // A non-null mac here implies this node is part of an address computation. // If so, we need to pass the existing mac down to the child node. // // Otherwise, use a new mac. if (subMac1 == nullptr) { subMac1 = &subIndMac1; subMac1->m_kind = MACK_Addr; } break; case GT_COMMA: // In a comma, the incoming context only applies to the rightmost arg of the // comma list. The left arg (op1) gets a fresh context. subMac1 = nullptr; break; case GT_OBJ: case GT_BLK: case GT_IND: // A non-null mac here implies this node is part of an address computation (the tree parent is // GT_ADDR). // If so, we need to pass the existing mac down to the child node. // // Otherwise, use a new mac. if (subMac1 == nullptr) { subMac1 = &subIndMac1; } break; default: break; } } // For additions, if we're in an IND context keep track of whether // all offsets added to the address are constant, and their sum. if (tree->gtOper == GT_ADD && subMac1 != nullptr) { assert(subMac1->m_kind == MACK_Ind || subMac1->m_kind == MACK_Addr); // Can't be a CopyBlock. GenTree* otherOp = tree->AsOp()->gtOp2; // Is the other operator a constant? if (otherOp->IsCnsIntOrI()) { ClrSafeInt<size_t> totalOffset(subMac1->m_totalOffset); totalOffset += otherOp->AsIntConCommon()->IconValue(); if (totalOffset.IsOverflow()) { // We will consider an offset so large as to overflow as "not a constant" -- // we will do a null check. subMac1->m_allConstantOffsets = false; } else { subMac1->m_totalOffset += otherOp->AsIntConCommon()->IconValue(); } } else { subMac1->m_allConstantOffsets = false; } } // If op1 is a GT_FIELD or indir, we need to pass down the mac if // its parent is GT_ADDR, since the address of op1 // is part of an ongoing address computation. Otherwise // op1 represents the value of the field and so any address // calculations it does are in a new context. if (((op1->gtOper == GT_FIELD) || op1->OperIsIndir()) && (tree->gtOper != GT_ADDR)) { subMac1 = nullptr; // The impact of op1's value to any ongoing // address computation is handled below when looking // at op2. } tree->AsOp()->gtOp1 = op1 = fgMorphTree(op1, subMac1); // If we are exiting the "then" part of a Qmark-Colon we must // save the state of the current copy assignment table // so that we can merge this state with the "else" part exit if (isQmarkColon) { noway_assert(optLocalAssertionProp); if (optAssertionCount) { noway_assert(optAssertionCount <= optMaxAssertionCount); // else ALLOCA() is a bad idea unsigned tabSize = optAssertionCount * sizeof(AssertionDsc); thenAssertionTab = (AssertionDsc*)ALLOCA(tabSize); thenAssertionCount = optAssertionCount; memcpy(thenAssertionTab, optAssertionTabPrivate, tabSize); } else { thenAssertionCount = 0; thenAssertionTab = nullptr; } } /* Morphing along with folding and inlining may have changed the * side effect flags, so we have to reset them * * NOTE: Don't reset the exception flags on nodes that may throw */ assert(tree->gtOper != GT_CALL); if (!tree->OperRequiresCallFlag(this)) { tree->gtFlags &= ~GTF_CALL; } /* Propagate the new flags */ tree->gtFlags |= (op1->gtFlags & GTF_ALL_EFFECT); // &aliasedVar doesn't need GTF_GLOB_REF, though alisasedVar does // Similarly for clsVar if (oper == GT_ADDR && (op1->gtOper == GT_LCL_VAR || op1->gtOper == GT_CLS_VAR)) { tree->gtFlags &= ~GTF_GLOB_REF; } } // if (op1) /*------------------------------------------------------------------------- * Process the second operand, if any */ if (op2) { // If we are entering the "else" part of a Qmark-Colon we must // reset the state of the current copy assignment table if (isQmarkColon) { noway_assert(optLocalAssertionProp); optAssertionReset(0); if (origAssertionCount) { size_t tabSize = origAssertionCount * sizeof(AssertionDsc); memcpy(optAssertionTabPrivate, origAssertionTab, tabSize); optAssertionReset(origAssertionCount); } } // We might need a new MorphAddressContext context to use in evaluating op2. // (These are used to convey parent context about how addresses being calculated // will be used; see the specification comment for MorphAddrContext for full details.) // Assume it's an Ind context to start. switch (tree->gtOper) { case GT_ADD: if (mac != nullptr && mac->m_kind == MACK_Ind) { GenTree* otherOp = tree->AsOp()->gtOp1; // Is the other operator a constant? if (otherOp->IsCnsIntOrI()) { mac->m_totalOffset += otherOp->AsIntConCommon()->IconValue(); } else { mac->m_allConstantOffsets = false; } } break; default: break; } // If op2 is a GT_FIELD or indir, we must be taking its value, // so it should evaluate its address in a new context. if ((op2->gtOper == GT_FIELD) || op2->OperIsIndir()) { // The impact of op2's value to any ongoing // address computation is handled above when looking // at op1. mac = nullptr; } tree->AsOp()->gtOp2 = op2 = fgMorphTree(op2, mac); /* Propagate the side effect flags from op2 */ tree->gtFlags |= (op2->gtFlags & GTF_ALL_EFFECT); // If we are exiting the "else" part of a Qmark-Colon we must // merge the state of the current copy assignment table with // that of the exit of the "then" part. if (isQmarkColon) { noway_assert(optLocalAssertionProp); // If either exit table has zero entries then // the merged table also has zero entries if (optAssertionCount == 0 || thenAssertionCount == 0) { optAssertionReset(0); } else { size_t tabSize = optAssertionCount * sizeof(AssertionDsc); if ((optAssertionCount != thenAssertionCount) || (memcmp(thenAssertionTab, optAssertionTabPrivate, tabSize) != 0)) { // Yes they are different so we have to find the merged set // Iterate over the copy asgn table removing any entries // that do not have an exact match in the thenAssertionTab AssertionIndex index = 1; while (index <= optAssertionCount) { AssertionDsc* curAssertion = optGetAssertion(index); for (unsigned j = 0; j < thenAssertionCount; j++) { AssertionDsc* thenAssertion = &thenAssertionTab[j]; // Do the left sides match? if ((curAssertion->op1.lcl.lclNum == thenAssertion->op1.lcl.lclNum) && (curAssertion->assertionKind == thenAssertion->assertionKind)) { // Do the right sides match? if ((curAssertion->op2.kind == thenAssertion->op2.kind) && (curAssertion->op2.lconVal == thenAssertion->op2.lconVal)) { goto KEEP; } else { goto REMOVE; } } } // // If we fall out of the loop above then we didn't find // any matching entry in the thenAssertionTab so it must // have been killed on that path so we remove it here // REMOVE: // The data at optAssertionTabPrivate[i] is to be removed CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if (verbose) { printf("The QMARK-COLON "); printTreeID(tree); printf(" removes assertion candidate #%d\n", index); } #endif optAssertionRemove(index); continue; KEEP: // The data at optAssertionTabPrivate[i] is to be kept index++; } } } } } // if (op2) #ifndef TARGET_64BIT DONE_MORPHING_CHILDREN: #endif // !TARGET_64BIT if (tree->OperIsIndirOrArrLength()) { tree->SetIndirExceptionFlags(this); } else { if (tree->OperMayThrow(this)) { // Mark the tree node as potentially throwing an exception tree->gtFlags |= GTF_EXCEPT; } else { if (((op1 == nullptr) || ((op1->gtFlags & GTF_EXCEPT) == 0)) && ((op2 == nullptr) || ((op2->gtFlags & GTF_EXCEPT) == 0))) { tree->gtFlags &= ~GTF_EXCEPT; } } } if (tree->OperRequiresAsgFlag()) { tree->gtFlags |= GTF_ASG; } else { if (((op1 == nullptr) || ((op1->gtFlags & GTF_ASG) == 0)) && ((op2 == nullptr) || ((op2->gtFlags & GTF_ASG) == 0))) { tree->gtFlags &= ~GTF_ASG; } } if (tree->OperRequiresCallFlag(this)) { tree->gtFlags |= GTF_CALL; } else { if (((op1 == nullptr) || ((op1->gtFlags & GTF_CALL) == 0)) && ((op2 == nullptr) || ((op2->gtFlags & GTF_CALL) == 0))) { tree->gtFlags &= ~GTF_CALL; } } /*------------------------------------------------------------------------- * Now do POST-ORDER processing */ if (varTypeIsGC(tree->TypeGet()) && (op1 && !varTypeIsGC(op1->TypeGet())) && (op2 && !varTypeIsGC(op2->TypeGet()))) { // The tree is really not GC but was marked as such. Now that the // children have been unmarked, unmark the tree too. // Remember that GT_COMMA inherits it's type only from op2 if (tree->gtOper == GT_COMMA) { tree->gtType = genActualType(op2->TypeGet()); } else { tree->gtType = genActualType(op1->TypeGet()); } } GenTree* oldTree = tree; GenTree* qmarkOp1 = nullptr; GenTree* qmarkOp2 = nullptr; if ((tree->OperGet() == GT_QMARK) && (tree->AsOp()->gtOp2->OperGet() == GT_COLON)) { qmarkOp1 = oldTree->AsOp()->gtOp2->AsOp()->gtOp1; qmarkOp2 = oldTree->AsOp()->gtOp2->AsOp()->gtOp2; } // Try to fold it, maybe we get lucky, tree = gtFoldExpr(tree); if (oldTree != tree) { /* if gtFoldExpr returned op1 or op2 then we are done */ if ((tree == op1) || (tree == op2) || (tree == qmarkOp1) || (tree == qmarkOp2)) { return tree; } /* If we created a comma-throw tree then we need to morph op1 */ if (fgIsCommaThrow(tree)) { tree->AsOp()->gtOp1 = fgMorphTree(tree->AsOp()->gtOp1); fgMorphTreeDone(tree); return tree; } return tree; } else if (tree->OperIsConst()) { return tree; } /* gtFoldExpr could have used setOper to change the oper */ oper = tree->OperGet(); typ = tree->TypeGet(); /* gtFoldExpr could have changed op1 and op2 */ op1 = tree->AsOp()->gtOp1; op2 = tree->gtGetOp2IfPresent(); // Do we have an integer compare operation? // if (tree->OperIsCompare() && varTypeIsIntegralOrI(tree->TypeGet())) { // Are we comparing against zero? // if (op2->IsIntegralConst(0)) { // Request that the codegen for op1 sets the condition flags // when it generates the code for op1. // // Codegen for op1 must set the condition flags if // this method returns true. // op1->gtRequestSetFlags(); } } /*------------------------------------------------------------------------- * Perform the required oper-specific postorder morphing */ GenTree* temp; size_t ival1; GenTree* lclVarTree; GenTree* effectiveOp1; FieldSeqNode* fieldSeq = nullptr; switch (oper) { case GT_ASG: if (op1->OperIs(GT_LCL_VAR) && ((op1->gtFlags & GTF_VAR_FOLDED_IND) != 0)) { op1->gtFlags &= ~GTF_VAR_FOLDED_IND; tree = fgDoNormalizeOnStore(tree); op2 = tree->gtGetOp2(); } lclVarTree = fgIsIndirOfAddrOfLocal(op1); if (lclVarTree != nullptr) { lclVarTree->gtFlags |= GTF_VAR_DEF; } effectiveOp1 = op1->gtEffectiveVal(); // If we are storing a small type, we might be able to omit a cast. if (effectiveOp1->OperIs(GT_IND, GT_CLS_VAR) && varTypeIsSmall(effectiveOp1)) { if (!gtIsActiveCSE_Candidate(op2) && op2->OperIs(GT_CAST) && varTypeIsIntegral(op2->AsCast()->CastOp()) && !op2->gtOverflow()) { var_types castType = op2->CastToType(); // If we are performing a narrowing cast and // castType is larger or the same as op1's type // then we can discard the cast. if (varTypeIsSmall(castType) && (genTypeSize(castType) >= genTypeSize(effectiveOp1))) { tree->AsOp()->gtOp2 = op2 = op2->AsCast()->CastOp(); } } } fgAssignSetVarDef(tree); /* We can't CSE the LHS of an assignment */ /* We also must set in the pre-morphing phase, otherwise assertionProp doesn't see it */ if (op1->IsLocal() || (op1->TypeGet() != TYP_STRUCT)) { op1->gtFlags |= GTF_DONT_CSE; } break; case GT_CAST: tree = fgOptimizeCast(tree->AsCast()); if (!tree->OperIsSimple()) { return tree; } if (tree->OperIs(GT_CAST) && tree->gtOverflow()) { fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_OVERFLOW); } typ = tree->TypeGet(); oper = tree->OperGet(); op1 = tree->AsOp()->gtGetOp1(); op2 = tree->gtGetOp2IfPresent(); break; case GT_EQ: case GT_NE: // It is not safe to reorder/delete CSE's if (!optValnumCSE_phase && op2->IsIntegralConst()) { tree = fgOptimizeEqualityComparisonWithConst(tree->AsOp()); assert(tree->OperIsCompare()); oper = tree->OperGet(); op1 = tree->gtGetOp1(); op2 = tree->gtGetOp2(); } goto COMPARE; case GT_LT: case GT_LE: case GT_GE: case GT_GT: if (!optValnumCSE_phase && (op1->OperIs(GT_CAST) || op2->OperIs(GT_CAST))) { tree = fgOptimizeRelationalComparisonWithCasts(tree->AsOp()); oper = tree->OperGet(); op1 = tree->gtGetOp1(); op2 = tree->gtGetOp2(); } // op2's value may be changed, so it cannot be a CSE candidate. if (op2->IsIntegralConst() && !gtIsActiveCSE_Candidate(op2)) { tree = fgOptimizeRelationalComparisonWithConst(tree->AsOp()); oper = tree->OperGet(); assert(op1 == tree->AsOp()->gtGetOp1()); assert(op2 == tree->AsOp()->gtGetOp2()); } COMPARE: noway_assert(tree->OperIsCompare()); break; case GT_MUL: #ifndef TARGET_64BIT if (typ == TYP_LONG) { // This must be GTF_MUL_64RSLT INDEBUG(tree->AsOp()->DebugCheckLongMul()); return tree; } #endif // TARGET_64BIT goto CM_OVF_OP; case GT_SUB: if (tree->gtOverflow()) { goto CM_OVF_OP; } // TODO #4104: there are a lot of other places where // this condition is not checked before transformations. if (fgGlobalMorph) { /* Check for "op1 - cns2" , we change it to "op1 + (-cns2)" */ noway_assert(op2); if (op2->IsCnsIntOrI() && !op2->IsIconHandle()) { // Negate the constant and change the node to be "+", // except when `op2` is a const byref. op2->AsIntConCommon()->SetIconValue(-op2->AsIntConCommon()->IconValue()); op2->AsIntConRef().gtFieldSeq = FieldSeqStore::NotAField(); oper = GT_ADD; tree->ChangeOper(oper); goto CM_ADD_OP; } /* Check for "cns1 - op2" , we change it to "(cns1 + (-op2))" */ noway_assert(op1); if (op1->IsCnsIntOrI()) { noway_assert(varTypeIsIntOrI(tree)); // The type of the new GT_NEG node cannot just be op2->TypeGet(). // Otherwise we may sign-extend incorrectly in cases where the GT_NEG // node ends up feeding directly into a cast, for example in // GT_CAST<ubyte>(GT_SUB(0, s_1.ubyte)) tree->AsOp()->gtOp2 = op2 = gtNewOperNode(GT_NEG, genActualType(op2->TypeGet()), op2); fgMorphTreeDone(op2); oper = GT_ADD; tree->ChangeOper(oper); goto CM_ADD_OP; } /* No match - exit */ } // Skip optimization if non-NEG operand is constant. // Both op1 and op2 are not constant because it was already checked above. if (opts.OptimizationEnabled() && fgGlobalMorph) { // a - -b = > a + b // SUB(a, (NEG(b)) => ADD(a, b) if (!op1->OperIs(GT_NEG) && op2->OperIs(GT_NEG)) { // tree: SUB // op1: a // op2: NEG // op2Child: b GenTree* op2Child = op2->AsOp()->gtOp1; // b oper = GT_ADD; tree->SetOper(oper, GenTree::PRESERVE_VN); tree->AsOp()->gtOp2 = op2Child; DEBUG_DESTROY_NODE(op2); op2 = op2Child; } // -a - -b = > b - a // SUB(NEG(a), (NEG(b)) => SUB(b, a) else if (op1->OperIs(GT_NEG) && op2->OperIs(GT_NEG) && gtCanSwapOrder(op1, op2)) { // tree: SUB // op1: NEG // op1Child: a // op2: NEG // op2Child: b GenTree* op1Child = op1->AsOp()->gtOp1; // a GenTree* op2Child = op2->AsOp()->gtOp1; // b tree->AsOp()->gtOp1 = op2Child; tree->AsOp()->gtOp2 = op1Child; DEBUG_DESTROY_NODE(op1); DEBUG_DESTROY_NODE(op2); op1 = op2Child; op2 = op1Child; } } break; #ifdef TARGET_ARM64 case GT_DIV: if (!varTypeIsFloating(tree->gtType)) { // Codegen for this instruction needs to be able to throw two exceptions: fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_OVERFLOW); fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_DIV_BY_ZERO); } break; case GT_UDIV: // Codegen for this instruction needs to be able to throw one exception: fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_DIV_BY_ZERO); break; #endif case GT_ADD: CM_OVF_OP: if (tree->gtOverflow()) { tree->gtRequestSetFlags(); // Add the excptn-throwing basic block to jump to on overflow fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_OVERFLOW); // We can't do any commutative morphing for overflow instructions break; } CM_ADD_OP: FALLTHROUGH; case GT_OR: case GT_XOR: case GT_AND: tree = fgOptimizeCommutativeArithmetic(tree->AsOp()); if (!tree->OperIsSimple()) { return tree; } typ = tree->TypeGet(); oper = tree->OperGet(); op1 = tree->gtGetOp1(); op2 = tree->gtGetOp2IfPresent(); break; case GT_NOT: case GT_NEG: // Remove double negation/not. // Note: this is not a safe tranformation if "tree" is a CSE candidate. // Consider for example the following expression: NEG(NEG(OP)), where any // NEG is a CSE candidate. Were we to morph this to just OP, CSE would fail to find // the original NEG in the statement. if (op1->OperIs(oper) && opts.OptimizationEnabled() && !gtIsActiveCSE_Candidate(tree) && !gtIsActiveCSE_Candidate(op1)) { JITDUMP("Remove double negation/not\n") GenTree* op1op1 = op1->gtGetOp1(); DEBUG_DESTROY_NODE(tree); DEBUG_DESTROY_NODE(op1); return op1op1; } // Distribute negation over simple multiplication/division expressions if (opts.OptimizationEnabled() && !optValnumCSE_phase && tree->OperIs(GT_NEG) && op1->OperIs(GT_MUL, GT_DIV)) { GenTreeOp* mulOrDiv = op1->AsOp(); GenTree* op1op1 = mulOrDiv->gtGetOp1(); GenTree* op1op2 = mulOrDiv->gtGetOp2(); if (!op1op1->IsCnsIntOrI() && op1op2->IsCnsIntOrI() && !op1op2->IsIconHandle()) { // NEG(MUL(a, C)) => MUL(a, -C) // NEG(DIV(a, C)) => DIV(a, -C), except when C = {-1, 1} ssize_t constVal = op1op2->AsIntCon()->IconValue(); if ((mulOrDiv->OperIs(GT_DIV) && (constVal != -1) && (constVal != 1)) || (mulOrDiv->OperIs(GT_MUL) && !mulOrDiv->gtOverflow())) { GenTree* newOp1 = op1op1; // a GenTree* newOp2 = gtNewIconNode(-constVal, op1op2->TypeGet()); // -C mulOrDiv->gtOp1 = newOp1; mulOrDiv->gtOp2 = newOp2; mulOrDiv->SetVNsFromNode(tree); DEBUG_DESTROY_NODE(tree); DEBUG_DESTROY_NODE(op1op2); return mulOrDiv; } } } /* Any constant cases should have been folded earlier */ noway_assert(!op1->OperIsConst() || !opts.OptEnabled(CLFLG_CONSTANTFOLD) || optValnumCSE_phase); break; case GT_CKFINITE: noway_assert(varTypeIsFloating(op1->TypeGet())); fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_ARITH_EXCPN); break; case GT_BOUNDS_CHECK: fgSetRngChkTarget(tree); break; case GT_OBJ: case GT_BLK: case GT_IND: { // If we have IND(ADDR(X)) and X has GTF_GLOB_REF, we must set GTF_GLOB_REF on // the OBJ. Note that the GTF_GLOB_REF will have been cleared on ADDR(X) where X // is a local or CLS_VAR, even if it has been address-exposed. if (op1->OperIs(GT_ADDR)) { tree->gtFlags |= (op1->AsUnOp()->gtGetOp1()->gtFlags & GTF_GLOB_REF); } if (!tree->OperIs(GT_IND)) { break; } // Can not remove a GT_IND if it is currently a CSE candidate. if (gtIsActiveCSE_Candidate(tree)) { break; } bool foldAndReturnTemp = false; temp = nullptr; ival1 = 0; // Don't remove a volatile GT_IND, even if the address points to a local variable. if ((tree->gtFlags & GTF_IND_VOLATILE) == 0) { /* Try to Fold *(&X) into X */ if (op1->gtOper == GT_ADDR) { // Can not remove a GT_ADDR if it is currently a CSE candidate. if (gtIsActiveCSE_Candidate(op1)) { break; } temp = op1->AsOp()->gtOp1; // X // In the test below, if they're both TYP_STRUCT, this of course does *not* mean that // they are the *same* struct type. In fact, they almost certainly aren't. If the // address has an associated field sequence, that identifies this case; go through // the "lcl_fld" path rather than this one. FieldSeqNode* addrFieldSeq = nullptr; // This is an unused out parameter below. if (typ == temp->TypeGet() && !GetZeroOffsetFieldMap()->Lookup(op1, &addrFieldSeq)) { foldAndReturnTemp = true; } else if (temp->OperIsLocal()) { unsigned lclNum = temp->AsLclVarCommon()->GetLclNum(); LclVarDsc* varDsc = lvaGetDesc(lclNum); // We will try to optimize when we have a promoted struct promoted with a zero lvFldOffset if (varDsc->lvPromoted && (varDsc->lvFldOffset == 0)) { noway_assert(varTypeIsStruct(varDsc)); // We will try to optimize when we have a single field struct that is being struct promoted if (varDsc->lvFieldCnt == 1) { unsigned lclNumFld = varDsc->lvFieldLclStart; // just grab the promoted field LclVarDsc* fieldVarDsc = lvaGetDesc(lclNumFld); // Also make sure that the tree type matches the fieldVarType and that it's lvFldOffset // is zero if (fieldVarDsc->TypeGet() == typ && (fieldVarDsc->lvFldOffset == 0)) { // We can just use the existing promoted field LclNum temp->AsLclVarCommon()->SetLclNum(lclNumFld); temp->gtType = fieldVarDsc->TypeGet(); foldAndReturnTemp = true; } } } // If the type of the IND (typ) is a "small int", and the type of the local has the // same width, then we can reduce to just the local variable -- it will be // correctly normalized. // // The below transformation cannot be applied if the local var needs to be normalized on load. else if (varTypeIsSmall(typ) && (genTypeSize(varDsc) == genTypeSize(typ)) && !lvaTable[lclNum].lvNormalizeOnLoad()) { const bool definitelyLoad = (tree->gtFlags & GTF_DONT_CSE) == 0; const bool possiblyStore = !definitelyLoad; if (possiblyStore || (varTypeIsUnsigned(varDsc) == varTypeIsUnsigned(typ))) { typ = temp->TypeGet(); tree->gtType = typ; foldAndReturnTemp = true; if (possiblyStore) { // This node can be on the left-hand-side of an assignment node. // Mark this node with GTF_VAR_FOLDED_IND to make sure that fgDoNormalizeOnStore() // is called on its parent in post-order morph. temp->gtFlags |= GTF_VAR_FOLDED_IND; } } } // For matching types we can fold else if (!varTypeIsStruct(typ) && (lvaTable[lclNum].lvType == typ) && !lvaTable[lclNum].lvNormalizeOnLoad()) { tree->gtType = typ = temp->TypeGet(); foldAndReturnTemp = true; } else { // Assumes that when Lookup returns "false" it will leave "fieldSeq" unmodified (i.e. // nullptr) assert(fieldSeq == nullptr); bool b = GetZeroOffsetFieldMap()->Lookup(op1, &fieldSeq); assert(b || fieldSeq == nullptr); if ((fieldSeq != nullptr) && (temp->OperGet() == GT_LCL_FLD)) { // Append the field sequence, change the type. temp->AsLclFld()->SetFieldSeq( GetFieldSeqStore()->Append(temp->AsLclFld()->GetFieldSeq(), fieldSeq)); temp->gtType = typ; foldAndReturnTemp = true; } } // Otherwise will will fold this into a GT_LCL_FLD below // where we check (temp != nullptr) } else // !temp->OperIsLocal() { // We don't try to fold away the GT_IND/GT_ADDR for this case temp = nullptr; } } else if (op1->OperGet() == GT_ADD) { #ifdef TARGET_ARM // Check for a misalignment floating point indirection. if (varTypeIsFloating(typ)) { GenTree* addOp2 = op1->AsOp()->gtGetOp2(); if (addOp2->IsCnsIntOrI()) { ssize_t offset = addOp2->AsIntCon()->gtIconVal; if ((offset % emitTypeSize(TYP_FLOAT)) != 0) { tree->gtFlags |= GTF_IND_UNALIGNED; } } } #endif // TARGET_ARM /* Try to change *(&lcl + cns) into lcl[cns] to prevent materialization of &lcl */ if (op1->AsOp()->gtOp1->OperGet() == GT_ADDR && op1->AsOp()->gtOp2->OperGet() == GT_CNS_INT && opts.OptimizationEnabled()) { // No overflow arithmetic with pointers noway_assert(!op1->gtOverflow()); temp = op1->AsOp()->gtOp1->AsOp()->gtOp1; if (!temp->OperIsLocal()) { temp = nullptr; break; } // Can not remove the GT_ADDR if it is currently a CSE candidate. if (gtIsActiveCSE_Candidate(op1->AsOp()->gtOp1)) { break; } ival1 = op1->AsOp()->gtOp2->AsIntCon()->gtIconVal; fieldSeq = op1->AsOp()->gtOp2->AsIntCon()->gtFieldSeq; // Does the address have an associated zero-offset field sequence? FieldSeqNode* addrFieldSeq = nullptr; if (GetZeroOffsetFieldMap()->Lookup(op1->AsOp()->gtOp1, &addrFieldSeq)) { fieldSeq = GetFieldSeqStore()->Append(addrFieldSeq, fieldSeq); } if (ival1 == 0 && typ == temp->TypeGet() && temp->TypeGet() != TYP_STRUCT) { noway_assert(!varTypeIsGC(temp->TypeGet())); foldAndReturnTemp = true; } else { // The emitter can't handle large offsets if (ival1 != (unsigned short)ival1) { break; } // The emitter can get confused by invalid offsets if (ival1 >= Compiler::lvaLclSize(temp->AsLclVarCommon()->GetLclNum())) { break; } } // Now we can fold this into a GT_LCL_FLD below // where we check (temp != nullptr) } } } // At this point we may have a lclVar or lclFld that might be foldable with a bit of extra massaging: // - We may have a load of a local where the load has a different type than the local // - We may have a load of a local plus an offset // // In these cases, we will change the lclVar or lclFld into a lclFld of the appropriate type and // offset if doing so is legal. The only cases in which this transformation is illegal are if the load // begins before the local or if the load extends beyond the end of the local (i.e. if the load is // out-of-bounds w.r.t. the local). if ((temp != nullptr) && !foldAndReturnTemp) { assert(temp->OperIsLocal()); const unsigned lclNum = temp->AsLclVarCommon()->GetLclNum(); LclVarDsc* const varDsc = lvaGetDesc(lclNum); const var_types tempTyp = temp->TypeGet(); const bool useExactSize = varTypeIsStruct(tempTyp) || (tempTyp == TYP_BLK) || (tempTyp == TYP_LCLBLK); const unsigned varSize = useExactSize ? varDsc->lvExactSize : genTypeSize(temp); // Make sure we do not enregister this lclVar. lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::LocalField)); // If the size of the load is greater than the size of the lclVar, we cannot fold this access into // a lclFld: the access represented by an lclFld node must begin at or after the start of the // lclVar and must not extend beyond the end of the lclVar. if ((ival1 >= 0) && ((ival1 + genTypeSize(typ)) <= varSize)) { GenTreeLclFld* lclFld; // We will turn a GT_LCL_VAR into a GT_LCL_FLD with an gtLclOffs of 'ival' // or if we already have a GT_LCL_FLD we will adjust the gtLclOffs by adding 'ival' // Then we change the type of the GT_LCL_FLD to match the orginal GT_IND type. // if (temp->OperGet() == GT_LCL_FLD) { lclFld = temp->AsLclFld(); lclFld->SetLclOffs(lclFld->GetLclOffs() + static_cast<unsigned>(ival1)); lclFld->SetFieldSeq(GetFieldSeqStore()->Append(lclFld->GetFieldSeq(), fieldSeq)); } else // We have a GT_LCL_VAR. { assert(temp->OperGet() == GT_LCL_VAR); temp->ChangeOper(GT_LCL_FLD); // Note that this makes the gtFieldSeq "NotAField". lclFld = temp->AsLclFld(); lclFld->SetLclOffs(static_cast<unsigned>(ival1)); if (fieldSeq != nullptr) { // If it does represent a field, note that. lclFld->SetFieldSeq(fieldSeq); } } temp->gtType = tree->gtType; foldAndReturnTemp = true; } } if (foldAndReturnTemp) { assert(temp != nullptr); assert(temp->TypeGet() == typ); assert((op1->OperGet() == GT_ADD) || (op1->OperGet() == GT_ADDR)); // Copy the value of GTF_DONT_CSE from the original tree to `temp`: it can be set for // 'temp' because a GT_ADDR always marks it for its operand. temp->gtFlags &= ~GTF_DONT_CSE; temp->gtFlags |= (tree->gtFlags & GTF_DONT_CSE); if (op1->OperGet() == GT_ADD) { DEBUG_DESTROY_NODE(op1->AsOp()->gtOp1); // GT_ADDR DEBUG_DESTROY_NODE(op1->AsOp()->gtOp2); // GT_CNS_INT } DEBUG_DESTROY_NODE(op1); // GT_ADD or GT_ADDR DEBUG_DESTROY_NODE(tree); // GT_IND // If the result of the fold is a local var, we may need to perform further adjustments e.g. for // normalization. if (temp->OperIs(GT_LCL_VAR)) { #ifdef DEBUG // We clear this flag on `temp` because `fgMorphLocalVar` may assert that this bit is clear // and the node in question must have this bit set (as it has already been morphed). temp->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED; #endif // DEBUG const bool forceRemorph = true; temp = fgMorphLocalVar(temp, forceRemorph); #ifdef DEBUG // We then set this flag on `temp` because `fgMorhpLocalVar` may not set it itself, and the // caller of `fgMorphSmpOp` may assert that this flag is set on `temp` once this function // returns. temp->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif // DEBUG } return temp; } // Only do this optimization when we are in the global optimizer. Doing this after value numbering // could result in an invalid value number for the newly generated GT_IND node. if ((op1->OperGet() == GT_COMMA) && fgGlobalMorph) { // Perform the transform IND(COMMA(x, ..., z)) == COMMA(x, ..., IND(z)). // TBD: this transformation is currently necessary for correctness -- it might // be good to analyze the failures that result if we don't do this, and fix them // in other ways. Ideally, this should be optional. GenTree* commaNode = op1; GenTreeFlags treeFlags = tree->gtFlags; commaNode->gtType = typ; commaNode->gtFlags = (treeFlags & ~GTF_REVERSE_OPS); // Bashing the GT_COMMA flags here is // dangerous, clear the GTF_REVERSE_OPS at // least. #ifdef DEBUG commaNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif while (commaNode->AsOp()->gtOp2->gtOper == GT_COMMA) { commaNode = commaNode->AsOp()->gtOp2; commaNode->gtType = typ; commaNode->gtFlags = (treeFlags & ~GTF_REVERSE_OPS & ~GTF_ASG & ~GTF_CALL); // Bashing the GT_COMMA flags here is // dangerous, clear the GTF_REVERSE_OPS, GT_ASG, and GT_CALL at // least. commaNode->gtFlags |= ((commaNode->AsOp()->gtOp1->gtFlags | commaNode->AsOp()->gtOp2->gtFlags) & (GTF_ASG | GTF_CALL)); #ifdef DEBUG commaNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif } bool wasArrIndex = (tree->gtFlags & GTF_IND_ARR_INDEX) != 0; ArrayInfo arrInfo; if (wasArrIndex) { bool b = GetArrayInfoMap()->Lookup(tree, &arrInfo); assert(b); GetArrayInfoMap()->Remove(tree); } tree = op1; GenTree* addr = commaNode->AsOp()->gtOp2; // TODO-1stClassStructs: we often create a struct IND without a handle, fix it. op1 = gtNewIndir(typ, addr); // This is very conservative op1->gtFlags |= treeFlags & ~GTF_ALL_EFFECT & ~GTF_IND_NONFAULTING; op1->gtFlags |= (addr->gtFlags & GTF_ALL_EFFECT); if (wasArrIndex) { GetArrayInfoMap()->Set(op1, arrInfo); } #ifdef DEBUG op1->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif commaNode->AsOp()->gtOp2 = op1; commaNode->gtFlags |= (op1->gtFlags & GTF_ALL_EFFECT); return tree; } break; } case GT_ADDR: // Can not remove op1 if it is currently a CSE candidate. if (gtIsActiveCSE_Candidate(op1)) { break; } if (op1->OperGet() == GT_IND) { if ((op1->gtFlags & GTF_IND_ARR_INDEX) == 0) { // Can not remove a GT_ADDR if it is currently a CSE candidate. if (gtIsActiveCSE_Candidate(tree)) { break; } // Perform the transform ADDR(IND(...)) == (...). GenTree* addr = op1->AsOp()->gtOp1; // If tree has a zero field sequence annotation, update the annotation // on addr node. FieldSeqNode* zeroFieldSeq = nullptr; if (GetZeroOffsetFieldMap()->Lookup(tree, &zeroFieldSeq)) { fgAddFieldSeqForZeroOffset(addr, zeroFieldSeq); } noway_assert(varTypeIsGC(addr->gtType) || addr->gtType == TYP_I_IMPL); DEBUG_DESTROY_NODE(op1); DEBUG_DESTROY_NODE(tree); return addr; } } else if (op1->OperGet() == GT_OBJ) { // Can not remove a GT_ADDR if it is currently a CSE candidate. if (gtIsActiveCSE_Candidate(tree)) { break; } // Perform the transform ADDR(OBJ(...)) == (...). GenTree* addr = op1->AsObj()->Addr(); noway_assert(varTypeIsGC(addr->gtType) || addr->gtType == TYP_I_IMPL); DEBUG_DESTROY_NODE(op1); DEBUG_DESTROY_NODE(tree); return addr; } else if ((op1->gtOper == GT_COMMA) && !optValnumCSE_phase) { // Perform the transform ADDR(COMMA(x, ..., z)) == COMMA(x, ..., ADDR(z)). // (Be sure to mark "z" as an l-value...) ArrayStack<GenTree*> commas(getAllocator(CMK_ArrayStack)); for (GenTree* comma = op1; comma != nullptr && comma->gtOper == GT_COMMA; comma = comma->gtGetOp2()) { commas.Push(comma); } GenTree* commaNode = commas.Top(); // The top-level addr might be annotated with a zeroOffset field. FieldSeqNode* zeroFieldSeq = nullptr; bool isZeroOffset = GetZeroOffsetFieldMap()->Lookup(tree, &zeroFieldSeq); tree = op1; commaNode->AsOp()->gtOp2->gtFlags |= GTF_DONT_CSE; // If the node we're about to put under a GT_ADDR is an indirection, it // doesn't need to be materialized, since we only want the addressing mode. Because // of this, this GT_IND is not a faulting indirection and we don't have to extract it // as a side effect. GenTree* commaOp2 = commaNode->AsOp()->gtOp2; if (commaOp2->OperIsBlk()) { commaOp2->SetOper(GT_IND); } if (commaOp2->gtOper == GT_IND) { commaOp2->gtFlags |= GTF_IND_NONFAULTING; commaOp2->gtFlags &= ~GTF_EXCEPT; commaOp2->gtFlags |= (commaOp2->AsOp()->gtOp1->gtFlags & GTF_EXCEPT); } op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, commaOp2); if (isZeroOffset) { // Transfer the annotation to the new GT_ADDR node. fgAddFieldSeqForZeroOffset(op1, zeroFieldSeq); } commaNode->AsOp()->gtOp2 = op1; // Originally, I gave all the comma nodes type "byref". But the ADDR(IND(x)) == x transform // might give op1 a type different from byref (like, say, native int). So now go back and give // all the comma nodes the type of op1. // TODO: the comma flag update below is conservative and can be improved. // For example, if we made the ADDR(IND(x)) == x transformation, we may be able to // get rid of some of the IND flags on the COMMA nodes (e.g., GTF_GLOB_REF). while (!commas.Empty()) { GenTree* comma = commas.Pop(); comma->gtType = op1->gtType; comma->gtFlags |= op1->gtFlags; #ifdef DEBUG comma->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif gtUpdateNodeSideEffects(comma); } return tree; } break; case GT_COLON: if (fgGlobalMorph) { /* Mark the nodes that are conditionally executed */ fgWalkTreePre(&tree, gtMarkColonCond); } /* Since we're doing this postorder we clear this if it got set by a child */ fgRemoveRestOfBlock = false; break; case GT_COMMA: /* Special case: trees that don't produce a value */ if (op2->OperIs(GT_ASG) || (op2->OperGet() == GT_COMMA && op2->TypeGet() == TYP_VOID) || fgIsThrow(op2)) { typ = tree->gtType = TYP_VOID; } // If we are in the Valuenum CSE phase then don't morph away anything as these // nodes may have CSE defs/uses in them. // if (!optValnumCSE_phase) { // Extract the side effects from the left side of the comma. Since they don't "go" anywhere, this // is all we need. GenTree* op1SideEffects = nullptr; // The addition of "GTF_MAKE_CSE" below prevents us from throwing away (for example) // hoisted expressions in loops. gtExtractSideEffList(op1, &op1SideEffects, (GTF_SIDE_EFFECT | GTF_MAKE_CSE)); if (op1SideEffects) { // Replace the left hand side with the side effect list. op1 = op1SideEffects; tree->AsOp()->gtOp1 = op1SideEffects; gtUpdateNodeSideEffects(tree); } else { op2->gtFlags |= (tree->gtFlags & (GTF_DONT_CSE | GTF_LATE_ARG)); DEBUG_DESTROY_NODE(tree); DEBUG_DESTROY_NODE(op1); return op2; } // If the right operand is just a void nop node, throw it away. Unless this is a // comma throw, in which case we want the top-level morphing loop to recognize it. if (op2->IsNothingNode() && op1->TypeIs(TYP_VOID) && !fgIsCommaThrow(tree)) { op1->gtFlags |= (tree->gtFlags & (GTF_DONT_CSE | GTF_LATE_ARG)); DEBUG_DESTROY_NODE(tree); DEBUG_DESTROY_NODE(op2); return op1; } } break; case GT_JTRUE: /* Special case if fgRemoveRestOfBlock is set to true */ if (fgRemoveRestOfBlock) { if (fgIsCommaThrow(op1, true)) { GenTree* throwNode = op1->AsOp()->gtOp1; JITDUMP("Removing [%06d] GT_JTRUE as the block now unconditionally throws an exception.\n", dspTreeID(tree)); DEBUG_DESTROY_NODE(tree); return throwNode; } noway_assert(op1->OperIsCompare()); noway_assert(op1->gtFlags & GTF_EXCEPT); // We need to keep op1 for the side-effects. Hang it off // a GT_COMMA node JITDUMP("Keeping side-effects by bashing [%06d] GT_JTRUE into a GT_COMMA.\n", dspTreeID(tree)); tree->ChangeOper(GT_COMMA); tree->AsOp()->gtOp2 = op2 = gtNewNothingNode(); // Additionally since we're eliminating the JTRUE // codegen won't like it if op1 is a RELOP of longs, floats or doubles. // So we change it into a GT_COMMA as well. JITDUMP("Also bashing [%06d] (a relop) into a GT_COMMA.\n", dspTreeID(op1)); op1->ChangeOper(GT_COMMA); op1->gtFlags &= ~GTF_UNSIGNED; // Clear the unsigned flag if it was set on the relop op1->gtType = op1->AsOp()->gtOp1->gtType; return tree; } break; case GT_INTRINSIC: if (tree->AsIntrinsic()->gtIntrinsicName == NI_System_Runtime_CompilerServices_RuntimeHelpers_IsKnownConstant) { // Should be expanded by the time it reaches CSE phase assert(!optValnumCSE_phase); JITDUMP("\nExpanding RuntimeHelpers.IsKnownConstant to "); if (op1->OperIsConst()) { // We're lucky to catch a constant here while importer was not JITDUMP("true\n"); DEBUG_DESTROY_NODE(tree, op1); tree = gtNewIconNode(1); } else { GenTree* op1SideEffects = nullptr; gtExtractSideEffList(op1, &op1SideEffects, GTF_ALL_EFFECT); if (op1SideEffects != nullptr) { DEBUG_DESTROY_NODE(tree); // Keep side-effects of op1 tree = gtNewOperNode(GT_COMMA, TYP_INT, op1SideEffects, gtNewIconNode(0)); JITDUMP("false with side effects:\n") DISPTREE(tree); } else { JITDUMP("false\n"); DEBUG_DESTROY_NODE(tree, op1); tree = gtNewIconNode(0); } } INDEBUG(tree->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED); return tree; } break; default: break; } assert(oper == tree->gtOper); // Propagate comma throws. // If we are in the Valuenum CSE phase then don't morph away anything as these // nodes may have CSE defs/uses in them. if (fgGlobalMorph && (oper != GT_ASG) && (oper != GT_COLON)) { if ((op1 != nullptr) && fgIsCommaThrow(op1, true)) { GenTree* propagatedThrow = fgPropagateCommaThrow(tree, op1->AsOp(), GTF_EMPTY); if (propagatedThrow != nullptr) { return propagatedThrow; } } if ((op2 != nullptr) && fgIsCommaThrow(op2, true)) { GenTree* propagatedThrow = fgPropagateCommaThrow(tree, op2->AsOp(), op1->gtFlags & GTF_ALL_EFFECT); if (propagatedThrow != nullptr) { return propagatedThrow; } } } /*------------------------------------------------------------------------- * Optional morphing is done if tree transformations is permitted */ if ((opts.compFlags & CLFLG_TREETRANS) == 0) { return tree; } tree = fgMorphSmpOpOptional(tree->AsOp()); return tree; } //------------------------------------------------------------------------ // fgOptimizeCast: Optimizes the supplied GT_CAST tree. // // Tries to get rid of the cast, its operand, the GTF_OVERFLOW flag, calls // calls "optNarrowTree". Called in post-order by "fgMorphSmpOp". // // Arguments: // tree - the cast tree to optimize // // Return Value: // The optimized tree (that can have any shape). // GenTree* Compiler::fgOptimizeCast(GenTreeCast* cast) { GenTree* src = cast->CastOp(); if (gtIsActiveCSE_Candidate(cast) || gtIsActiveCSE_Candidate(src)) { return cast; } // See if we can discard the cast. if (varTypeIsIntegral(cast) && varTypeIsIntegral(src)) { IntegralRange srcRange = IntegralRange::ForNode(src, this); IntegralRange noOvfRange = IntegralRange::ForCastInput(cast); if (noOvfRange.Contains(srcRange)) { // Casting between same-sized types is a no-op, // given we have proven this cast cannot overflow. if (genActualType(cast) == genActualType(src)) { return src; } cast->ClearOverflow(); cast->SetAllEffectsFlags(src); // Try and see if we can make this cast into a cheaper zero-extending version. if (genActualTypeIsInt(src) && cast->TypeIs(TYP_LONG) && srcRange.IsPositive()) { cast->SetUnsigned(); } } // For checked casts, we're done. if (cast->gtOverflow()) { return cast; } var_types castToType = cast->CastToType(); // For indir-like nodes, we may be able to change their type to satisfy (and discard) the cast. if (varTypeIsSmall(castToType) && (genTypeSize(castToType) == genTypeSize(src)) && src->OperIs(GT_IND, GT_CLS_VAR, GT_LCL_FLD)) { // We're changing the type here so we need to update the VN; // in other cases we discard the cast without modifying src // so the VN doesn't change. src->ChangeType(castToType); src->SetVNsFromNode(cast); return src; } // Try to narrow the operand of the cast and discard the cast. if (opts.OptEnabled(CLFLG_TREETRANS) && (genTypeSize(src) > genTypeSize(castToType)) && optNarrowTree(src, src->TypeGet(), castToType, cast->gtVNPair, false)) { optNarrowTree(src, src->TypeGet(), castToType, cast->gtVNPair, true); // "optNarrowTree" may leave a dead cast behind. if (src->OperIs(GT_CAST) && (src->AsCast()->CastToType() == genActualType(src->AsCast()->CastOp()))) { src = src->AsCast()->CastOp(); } return src; } // Check for two consecutive casts, we may be able to discard the intermediate one. if (opts.OptimizationEnabled() && src->OperIs(GT_CAST) && !src->gtOverflow()) { var_types dstCastToType = castToType; var_types srcCastToType = src->AsCast()->CastToType(); // CAST(ubyte <- CAST(short <- X)): CAST(ubyte <- X). // CAST(ushort <- CAST(short <- X)): CAST(ushort <- X). if (varTypeIsSmall(srcCastToType) && (genTypeSize(dstCastToType) <= genTypeSize(srcCastToType))) { cast->CastOp() = src->AsCast()->CastOp(); DEBUG_DESTROY_NODE(src); } } } return cast; } //------------------------------------------------------------------------ // fgOptimizeEqualityComparisonWithConst: optimizes various EQ/NE(OP, CONST) patterns. // // Arguments: // cmp - The GT_NE/GT_EQ tree the second operand of which is an integral constant // // Return Value: // The optimized tree, "cmp" in case no optimizations were done. // Currently only returns relop trees. // GenTree* Compiler::fgOptimizeEqualityComparisonWithConst(GenTreeOp* cmp) { assert(cmp->OperIs(GT_EQ, GT_NE)); assert(cmp->gtGetOp2()->IsIntegralConst()); assert(!optValnumCSE_phase); GenTree* op1 = cmp->gtGetOp1(); GenTreeIntConCommon* op2 = cmp->gtGetOp2()->AsIntConCommon(); // Check for "(expr +/- icon1) ==/!= (non-zero-icon2)". if (op2->IsCnsIntOrI() && (op2->IconValue() != 0)) { // Since this can occur repeatedly we use a while loop. while (op1->OperIs(GT_ADD, GT_SUB) && op1->AsOp()->gtGetOp2()->IsCnsIntOrI() && op1->TypeIs(TYP_INT) && !op1->gtOverflow()) { // Got it; change "x + icon1 == icon2" to "x == icon2 - icon1". ssize_t op1Value = op1->AsOp()->gtGetOp2()->AsIntCon()->IconValue(); ssize_t op2Value = op2->IconValue(); if (op1->OperIs(GT_ADD)) { op2Value -= op1Value; } else { op2Value += op1Value; } op1 = op1->AsOp()->gtGetOp1(); op2->SetIconValue(static_cast<int32_t>(op2Value)); } cmp->gtOp1 = op1; fgUpdateConstTreeValueNumber(op2); } // Here we look for the following tree // // EQ/NE // / \. // op1 CNS 0/1 // if (op2->IsIntegralConst(0) || op2->IsIntegralConst(1)) { ssize_t op2Value = static_cast<ssize_t>(op2->IntegralValue()); if (op1->OperIsCompare()) { // Here we look for the following tree // // EQ/NE -> RELOP/!RELOP // / \ / \. // RELOP CNS 0/1 // / \. // // Note that we will remove/destroy the EQ/NE node and move // the RELOP up into it's location. // Here we reverse the RELOP if necessary. bool reverse = ((op2Value == 0) == (cmp->OperIs(GT_EQ))); if (reverse) { gtReverseCond(op1); } noway_assert((op1->gtFlags & GTF_RELOP_JMP_USED) == 0); op1->gtFlags |= cmp->gtFlags & (GTF_RELOP_JMP_USED | GTF_DONT_CSE); op1->SetVNsFromNode(cmp); DEBUG_DESTROY_NODE(cmp); return op1; } // // Now we check for a compare with the result of an '&' operator // // Here we look for the following transformation: // // EQ/NE EQ/NE // / \ / \. // AND CNS 0/1 -> AND CNS 0 // / \ / \. // RSZ/RSH CNS 1 x CNS (1 << y) // / \. // x CNS_INT +y if (fgGlobalMorph && op1->OperIs(GT_AND) && op1->AsOp()->gtGetOp1()->OperIs(GT_RSZ, GT_RSH)) { GenTreeOp* andOp = op1->AsOp(); GenTreeOp* rshiftOp = andOp->gtGetOp1()->AsOp(); if (!rshiftOp->gtGetOp2()->IsCnsIntOrI()) { goto SKIP; } ssize_t shiftAmount = rshiftOp->gtGetOp2()->AsIntCon()->IconValue(); if (shiftAmount < 0) { goto SKIP; } if (!andOp->gtGetOp2()->IsIntegralConst(1)) { goto SKIP; } GenTreeIntConCommon* andMask = andOp->gtGetOp2()->AsIntConCommon(); if (andOp->TypeIs(TYP_INT)) { if (shiftAmount > 31) { goto SKIP; } andMask->SetIconValue(static_cast<int32_t>(1 << shiftAmount)); // Reverse the condition if necessary. if (op2Value == 1) { gtReverseCond(cmp); op2->SetIconValue(0); } } else if (andOp->TypeIs(TYP_LONG)) { if (shiftAmount > 63) { goto SKIP; } andMask->SetLngValue(1ll << shiftAmount); // Reverse the cond if necessary if (op2Value == 1) { gtReverseCond(cmp); op2->SetLngValue(0); } } andOp->gtOp1 = rshiftOp->gtGetOp1(); DEBUG_DESTROY_NODE(rshiftOp->gtGetOp2()); DEBUG_DESTROY_NODE(rshiftOp); } } SKIP: // Now check for compares with small constant longs that can be cast to int. // Note that we filter out negative values here so that the transformations // below are correct. E. g. "EQ(-1L, CAST_UN(int))" is always "false", but were // we to make it into "EQ(-1, int)", "true" becomes possible for negative inputs. if (!op2->TypeIs(TYP_LONG) || ((op2->LngValue() >> 31) != 0)) { return cmp; } if (!op1->OperIs(GT_AND)) { // Another interesting case: cast from int. if (op1->OperIs(GT_CAST) && op1->AsCast()->CastOp()->TypeIs(TYP_INT) && !op1->gtOverflow()) { // Simply make this into an integer comparison. cmp->gtOp1 = op1->AsCast()->CastOp(); op2->BashToConst(static_cast<int32_t>(op2->LngValue())); fgUpdateConstTreeValueNumber(op2); } return cmp; } // Now we perform the following optimization: // EQ/NE(AND(OP long, CNS_LNG), CNS_LNG) => // EQ/NE(AND(CAST(int <- OP), CNS_INT), CNS_INT) // when the constants are sufficiently small. // This transform cannot preserve VNs. if (fgGlobalMorph) { assert(op1->TypeIs(TYP_LONG) && op1->OperIs(GT_AND)); // Is the result of the mask effectively an INT? GenTreeOp* andOp = op1->AsOp(); if (!andOp->gtGetOp2()->OperIs(GT_CNS_NATIVELONG)) { return cmp; } GenTreeIntConCommon* andMask = andOp->gtGetOp2()->AsIntConCommon(); if ((andMask->LngValue() >> 32) != 0) { return cmp; } // Now we narrow the first operand of AND to int. if (optNarrowTree(andOp->gtGetOp1(), TYP_LONG, TYP_INT, ValueNumPair(), false)) { optNarrowTree(andOp->gtGetOp1(), TYP_LONG, TYP_INT, ValueNumPair(), true); } else { andOp->gtOp1 = gtNewCastNode(TYP_INT, andOp->gtGetOp1(), false, TYP_INT); } assert(andMask == andOp->gtGetOp2()); // Now replace the mask node. andMask->BashToConst(static_cast<int32_t>(andMask->LngValue())); // Now change the type of the AND node. andOp->ChangeType(TYP_INT); // Finally we replace the comparand. op2->BashToConst(static_cast<int32_t>(op2->LngValue())); } return cmp; } //------------------------------------------------------------------------ // fgOptimizeRelationalComparisonWithConst: optimizes a comparison operation. // // Recognizes comparisons against various constant operands and morphs // them, if possible, into comparisons against zero. // // Arguments: // cmp - the GT_LE/GT_LT/GT_GE/GT_GT tree to morph. // // Return Value: // The "cmp" tree, possibly with a modified oper. // The second operand's constant value may be modified as well. // // Assumptions: // The operands have been swapped so that any constants are on the right. // The second operand is an integral constant. // GenTree* Compiler::fgOptimizeRelationalComparisonWithConst(GenTreeOp* cmp) { assert(cmp->OperIs(GT_LE, GT_LT, GT_GE, GT_GT)); assert(cmp->gtGetOp2()->IsIntegralConst()); assert(!gtIsActiveCSE_Candidate(cmp->gtGetOp2())); GenTree* op1 = cmp->gtGetOp1(); GenTreeIntConCommon* op2 = cmp->gtGetOp2()->AsIntConCommon(); assert(genActualType(op1) == genActualType(op2)); genTreeOps oper = cmp->OperGet(); int64_t op2Value = op2->IntegralValue(); if (op2Value == 1) { // Check for "expr >= 1". if (oper == GT_GE) { // Change to "expr != 0" for unsigned and "expr > 0" for signed. oper = cmp->IsUnsigned() ? GT_NE : GT_GT; } // Check for "expr < 1". else if (oper == GT_LT) { // Change to "expr == 0" for unsigned and "expr <= 0". oper = cmp->IsUnsigned() ? GT_EQ : GT_LE; } } // Check for "expr relop -1". else if (!cmp->IsUnsigned() && (op2Value == -1)) { // Check for "expr <= -1". if (oper == GT_LE) { // Change to "expr < 0". oper = GT_LT; } // Check for "expr > -1". else if (oper == GT_GT) { // Change to "expr >= 0". oper = GT_GE; } } else if (cmp->IsUnsigned()) { if ((oper == GT_LE) || (oper == GT_GT)) { if (op2Value == 0) { // IL doesn't have a cne instruction so compilers use cgt.un instead. The JIT // recognizes certain patterns that involve GT_NE (e.g (x & 4) != 0) and fails // if GT_GT is used instead. Transform (x GT_GT.unsigned 0) into (x GT_NE 0) // and (x GT_LE.unsigned 0) into (x GT_EQ 0). The later case is rare, it sometimes // occurs as a result of branch inversion. oper = (oper == GT_LE) ? GT_EQ : GT_NE; cmp->gtFlags &= ~GTF_UNSIGNED; } // LE_UN/GT_UN(expr, int/long.MaxValue) => GE/LT(expr, 0). else if (((op1->TypeIs(TYP_LONG) && (op2Value == INT64_MAX))) || ((genActualType(op1) == TYP_INT) && (op2Value == INT32_MAX))) { oper = (oper == GT_LE) ? GT_GE : GT_LT; cmp->gtFlags &= ~GTF_UNSIGNED; } } } if (!cmp->OperIs(oper)) { // Keep the old ValueNumber for 'tree' as the new expr // will still compute the same value as before. cmp->SetOper(oper, GenTree::PRESERVE_VN); op2->SetIntegralValue(0); fgUpdateConstTreeValueNumber(op2); } return cmp; } #ifdef FEATURE_HW_INTRINSICS //------------------------------------------------------------------------ // fgOptimizeHWIntrinsic: optimize a HW intrinsic node // // Arguments: // node - HWIntrinsic node to examine // // Returns: // The original node if no optimization happened or if tree bashing occured. // An alternative tree if an optimization happened. // // Notes: // Checks for HWIntrinsic nodes: Vector64.Create/Vector128.Create/Vector256.Create, // and if the call is one of these, attempt to optimize. // This is post-order, meaning that it will not morph the children. // GenTree* Compiler::fgOptimizeHWIntrinsic(GenTreeHWIntrinsic* node) { assert(!optValnumCSE_phase); if (opts.OptimizationDisabled()) { return node; } switch (node->GetHWIntrinsicId()) { case NI_Vector128_Create: #if defined(TARGET_XARCH) case NI_Vector256_Create: #elif defined(TARGET_ARM64) case NI_Vector64_Create: #endif { bool hwAllArgsAreConstZero = true; for (GenTree* arg : node->Operands()) { if (!arg->IsIntegralConst(0) && !arg->IsFloatPositiveZero()) { hwAllArgsAreConstZero = false; break; } } if (hwAllArgsAreConstZero) { switch (node->GetHWIntrinsicId()) { case NI_Vector128_Create: { node->ResetHWIntrinsicId(NI_Vector128_get_Zero); break; } #if defined(TARGET_XARCH) case NI_Vector256_Create: { node->ResetHWIntrinsicId(NI_Vector256_get_Zero); break; } #elif defined(TARGET_ARM64) case NI_Vector64_Create: { node->ResetHWIntrinsicId(NI_Vector64_get_Zero); break; } #endif default: unreached(); } } break; } default: break; } return node; } #endif //------------------------------------------------------------------------ // fgOptimizeCommutativeArithmetic: Optimizes commutative operations. // // Arguments: // tree - the unchecked GT_ADD/GT_MUL/GT_OR/GT_XOR/GT_AND tree to optimize. // // Return Value: // The optimized tree that can have any shape. // GenTree* Compiler::fgOptimizeCommutativeArithmetic(GenTreeOp* tree) { assert(tree->OperIs(GT_ADD, GT_MUL, GT_OR, GT_XOR, GT_AND)); assert(!tree->gtOverflowEx()); // Commute constants to the right. if (tree->gtGetOp1()->OperIsConst() && !tree->gtGetOp1()->TypeIs(TYP_REF)) { // TODO-Review: We used to assert here that "(!op2->OperIsConst() || !opts.OptEnabled(CLFLG_CONSTANTFOLD))". // This may indicate a missed "remorph". Task is to re-enable this assertion and investigate. std::swap(tree->gtOp1, tree->gtOp2); } if (fgOperIsBitwiseRotationRoot(tree->OperGet())) { GenTree* rotationTree = fgRecognizeAndMorphBitwiseRotation(tree); if (rotationTree != nullptr) { return rotationTree; } } if (fgGlobalMorph && tree->OperIs(GT_AND, GT_OR, GT_XOR)) { GenTree* castTree = fgMorphCastedBitwiseOp(tree->AsOp()); if (castTree != nullptr) { return castTree; } } if (varTypeIsIntegralOrI(tree)) { genTreeOps oldTreeOper = tree->OperGet(); GenTreeOp* optimizedTree = fgMorphCommutative(tree->AsOp()); if (optimizedTree != nullptr) { if (!optimizedTree->OperIs(oldTreeOper)) { // "optimizedTree" could end up being a COMMA. return optimizedTree; } tree = optimizedTree; } } if (!optValnumCSE_phase) { GenTree* optimizedTree = nullptr; if (tree->OperIs(GT_ADD)) { optimizedTree = fgOptimizeAddition(tree); } else if (tree->OperIs(GT_MUL)) { optimizedTree = fgOptimizeMultiply(tree); } else if (tree->OperIs(GT_AND)) { optimizedTree = fgOptimizeBitwiseAnd(tree); } if (optimizedTree != nullptr) { return optimizedTree; } } return tree; } //------------------------------------------------------------------------ // fgOptimizeAddition: optimizes addition. // // Arguments: // add - the unchecked GT_ADD tree to optimize. // // Return Value: // The optimized tree, that can have any shape, in case any transformations // were performed. Otherwise, "nullptr", guaranteeing no state change. // GenTree* Compiler::fgOptimizeAddition(GenTreeOp* add) { assert(add->OperIs(GT_ADD) && !add->gtOverflow()); assert(!optValnumCSE_phase); GenTree* op1 = add->gtGetOp1(); GenTree* op2 = add->gtGetOp2(); // Fold "((x + icon1) + (y + icon2))" to ((x + y) + (icon1 + icon2))". // Be careful not to create a byref pointer that may point outside of the ref object. // Only do this in global morph as we don't recompute the VN for "(x + y)", the new "op2". if (op1->OperIs(GT_ADD) && op2->OperIs(GT_ADD) && !op1->gtOverflow() && !op2->gtOverflow() && op1->AsOp()->gtGetOp2()->IsCnsIntOrI() && op2->AsOp()->gtGetOp2()->IsCnsIntOrI() && !varTypeIsGC(op1->AsOp()->gtGetOp1()) && !varTypeIsGC(op2->AsOp()->gtGetOp1()) && fgGlobalMorph) { GenTreeOp* addOne = op1->AsOp(); GenTreeOp* addTwo = op2->AsOp(); GenTreeIntCon* constOne = addOne->gtGetOp2()->AsIntCon(); GenTreeIntCon* constTwo = addTwo->gtGetOp2()->AsIntCon(); addOne->gtOp2 = addTwo->gtGetOp1(); addOne->SetAllEffectsFlags(addOne->gtGetOp1(), addOne->gtGetOp2()); DEBUG_DESTROY_NODE(addTwo); constOne->SetValueTruncating(constOne->IconValue() + constTwo->IconValue()); op2 = constOne; add->gtOp2 = constOne; DEBUG_DESTROY_NODE(constTwo); } // Fold (x + 0) - given it won't change the tree type to TYP_REF. // TODO-Bug: this code will lose the GC-ness of a tree like "native int + byref(0)". if (op2->IsIntegralConst(0) && ((add->TypeGet() == op1->TypeGet()) || !op1->TypeIs(TYP_REF))) { if (op2->IsCnsIntOrI() && varTypeIsI(op1)) { fgAddFieldSeqForZeroOffset(op1, op2->AsIntCon()->gtFieldSeq); } DEBUG_DESTROY_NODE(op2); DEBUG_DESTROY_NODE(add); return op1; } // Note that these transformations are legal for floating-point ADDs as well. if (opts.OptimizationEnabled()) { // - a + b = > b - a // ADD((NEG(a), b) => SUB(b, a) // Do not do this if "op2" is constant for canonicalization purposes. if (op1->OperIs(GT_NEG) && !op2->OperIs(GT_NEG) && !op2->IsIntegralConst() && gtCanSwapOrder(op1, op2)) { add->SetOper(GT_SUB); add->gtOp1 = op2; add->gtOp2 = op1->AsOp()->gtGetOp1(); DEBUG_DESTROY_NODE(op1); return add; } // a + -b = > a - b // ADD(a, (NEG(b)) => SUB(a, b) if (!op1->OperIs(GT_NEG) && op2->OperIs(GT_NEG)) { add->SetOper(GT_SUB); add->gtOp2 = op2->AsOp()->gtGetOp1(); DEBUG_DESTROY_NODE(op2); return add; } } return nullptr; } //------------------------------------------------------------------------ // fgOptimizeMultiply: optimizes multiplication. // // Arguments: // mul - the unchecked TYP_I_IMPL/TYP_INT GT_MUL tree to optimize. // // Return Value: // The optimized tree, that can have any shape, in case any transformations // were performed. Otherwise, "nullptr", guaranteeing no state change. // GenTree* Compiler::fgOptimizeMultiply(GenTreeOp* mul) { assert(mul->OperIs(GT_MUL)); assert(varTypeIsIntOrI(mul) || varTypeIsFloating(mul)); assert(!mul->gtOverflow()); assert(!optValnumCSE_phase); GenTree* op1 = mul->gtGetOp1(); GenTree* op2 = mul->gtGetOp2(); assert(mul->TypeGet() == genActualType(op1)); assert(mul->TypeGet() == genActualType(op2)); if (opts.OptimizationEnabled() && op2->IsCnsFltOrDbl()) { double multiplierValue = op2->AsDblCon()->gtDconVal; if (multiplierValue == 1.0) { // Fold "x * 1.0" to "x". DEBUG_DESTROY_NODE(op2); DEBUG_DESTROY_NODE(mul); return op1; } // Fold "x * 2.0" to "x + x". // If op1 is not a local we will have to introduce a temporary via GT_COMMA. // Unfortunately, it's not optHoistLoopCode-friendly (yet), so we'll only do // this for locals / after hoisting has run (when rationalization remorphs // math INTRINSICSs into calls...). if ((multiplierValue == 2.0) && (op1->IsLocal() || (fgOrder == FGOrderLinear))) { op2 = fgMakeMultiUse(&op1); GenTree* add = gtNewOperNode(GT_ADD, mul->TypeGet(), op1, op2); INDEBUG(add->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED); return add; } } if (op2->IsIntegralConst()) { ssize_t mult = op2->AsIntConCommon()->IconValue(); bool op2IsConstIndex = op2->OperGet() == GT_CNS_INT && op2->AsIntCon()->gtFieldSeq != nullptr && op2->AsIntCon()->gtFieldSeq->IsConstantIndexFieldSeq(); assert(!op2IsConstIndex || op2->AsIntCon()->gtFieldSeq->m_next == nullptr); if (mult == 0) { // We may be able to throw away op1 (unless it has side-effects) if ((op1->gtFlags & GTF_SIDE_EFFECT) == 0) { DEBUG_DESTROY_NODE(op1); DEBUG_DESTROY_NODE(mul); return op2; // Just return the "0" node } // We need to keep op1 for the side-effects. Hang it off a GT_COMMA node. mul->ChangeOper(GT_COMMA, GenTree::PRESERVE_VN); return mul; } #ifdef TARGET_XARCH // Should we try to replace integer multiplication with lea/add/shift sequences? bool mulShiftOpt = compCodeOpt() != SMALL_CODE; #else // !TARGET_XARCH bool mulShiftOpt = false; #endif // !TARGET_XARCH size_t abs_mult = (mult >= 0) ? mult : -mult; size_t lowestBit = genFindLowestBit(abs_mult); bool changeToShift = false; // is it a power of two? (positive or negative) if (abs_mult == lowestBit) { // if negative negate (min-int does not need negation) if (mult < 0 && mult != SSIZE_T_MIN) { op1 = gtNewOperNode(GT_NEG, genActualType(op1), op1); mul->gtOp1 = op1; fgMorphTreeDone(op1); } // If "op2" is a constant array index, the other multiplicand must be a constant. // Transfer the annotation to the other one. if (op2->OperGet() == GT_CNS_INT && op2->AsIntCon()->gtFieldSeq != nullptr && op2->AsIntCon()->gtFieldSeq->IsConstantIndexFieldSeq()) { assert(op2->AsIntCon()->gtFieldSeq->m_next == nullptr); GenTree* otherOp = op1; if (otherOp->OperGet() == GT_NEG) { otherOp = otherOp->AsOp()->gtOp1; } assert(otherOp->OperGet() == GT_CNS_INT); assert(otherOp->AsIntCon()->gtFieldSeq == FieldSeqStore::NotAField()); otherOp->AsIntCon()->gtFieldSeq = op2->AsIntCon()->gtFieldSeq; } if (abs_mult == 1) { DEBUG_DESTROY_NODE(op2); DEBUG_DESTROY_NODE(mul); return op1; } // Change the multiplication into a shift by log2(val) bits. op2->AsIntConCommon()->SetIconValue(genLog2(abs_mult)); changeToShift = true; } else if (mulShiftOpt && (lowestBit > 1) && jitIsScaleIndexMul(lowestBit)) { int shift = genLog2(lowestBit); ssize_t factor = abs_mult >> shift; if (factor == 3 || factor == 5 || factor == 9) { // if negative negate (min-int does not need negation) if (mult < 0 && mult != SSIZE_T_MIN) { op1 = gtNewOperNode(GT_NEG, genActualType(op1), op1); mul->gtOp1 = op1; fgMorphTreeDone(op1); } GenTree* factorIcon = gtNewIconNode(factor, mul->TypeGet()); if (op2IsConstIndex) { factorIcon->AsIntCon()->gtFieldSeq = GetFieldSeqStore()->CreateSingleton(FieldSeqStore::ConstantIndexPseudoField); } // change the multiplication into a smaller multiplication (by 3, 5 or 9) and a shift op1 = gtNewOperNode(GT_MUL, mul->TypeGet(), op1, factorIcon); mul->gtOp1 = op1; fgMorphTreeDone(op1); op2->AsIntConCommon()->SetIconValue(shift); changeToShift = true; } } if (changeToShift) { fgUpdateConstTreeValueNumber(op2); mul->ChangeOper(GT_LSH, GenTree::PRESERVE_VN); return mul; } } return nullptr; } //------------------------------------------------------------------------ // fgOptimizeBitwiseAnd: optimizes the "and" operation. // // Arguments: // andOp - the GT_AND tree to optimize. // // Return Value: // The optimized tree, currently always a relop, in case any transformations // were performed. Otherwise, "nullptr", guaranteeing no state change. // GenTree* Compiler::fgOptimizeBitwiseAnd(GenTreeOp* andOp) { assert(andOp->OperIs(GT_AND)); assert(!optValnumCSE_phase); GenTree* op1 = andOp->gtGetOp1(); GenTree* op2 = andOp->gtGetOp2(); // Fold "cmp & 1" to just "cmp". if (andOp->TypeIs(TYP_INT) && op1->OperIsCompare() && op2->IsIntegralConst(1)) { DEBUG_DESTROY_NODE(op2); DEBUG_DESTROY_NODE(andOp); return op1; } return nullptr; } //------------------------------------------------------------------------ // fgOptimizeRelationalComparisonWithCasts: Recognizes comparisons against // various cast operands and tries to remove them. E.g.: // // * GE int // +--* CAST long <- ulong <- uint // | \--* X int // \--* CNS_INT long // // to: // // * GE_un int // +--* X int // \--* CNS_INT int // // same for: // // * GE int // +--* CAST long <- ulong <- uint // | \--* X int // \--* CAST long <- [u]long <- int // \--* ARR_LEN int // // These patterns quite often show up along with index checks // // Arguments: // cmp - the GT_LE/GT_LT/GT_GE/GT_GT tree to morph. // // Return Value: // Returns the same tree where operands might have narrower types // // Notes: // TODO-Casts: consider unifying this function with "optNarrowTree" // GenTree* Compiler::fgOptimizeRelationalComparisonWithCasts(GenTreeOp* cmp) { assert(cmp->OperIs(GT_LE, GT_LT, GT_GE, GT_GT)); assert(!optValnumCSE_phase); GenTree* op1 = cmp->gtGetOp1(); GenTree* op2 = cmp->gtGetOp2(); // Caller is expected to call this function only if we have CAST nodes assert(op1->OperIs(GT_CAST) || op2->OperIs(GT_CAST)); if (!op1->TypeIs(TYP_LONG)) { // We can extend this logic to handle small types as well, but currently it's done mostly to // assist range check elimination return cmp; } GenTree* castOp; GenTree* knownPositiveOp; bool knownPositiveIsOp2; if (op2->IsIntegralConst() || ((op2->OperIs(GT_CAST) && op2->AsCast()->CastOp()->OperIs(GT_ARR_LENGTH)))) { // op2 is either a LONG constant or (T)ARR_LENGTH knownPositiveIsOp2 = true; castOp = cmp->gtGetOp1(); knownPositiveOp = cmp->gtGetOp2(); } else { // op1 is either a LONG constant (yes, it's pretty normal for relops) // or (T)ARR_LENGTH castOp = cmp->gtGetOp2(); knownPositiveOp = cmp->gtGetOp1(); knownPositiveIsOp2 = false; } if (castOp->OperIs(GT_CAST) && varTypeIsLong(castOp->CastToType()) && castOp->AsCast()->CastOp()->TypeIs(TYP_INT) && castOp->IsUnsigned() && !castOp->gtOverflow()) { bool knownPositiveFitsIntoU32 = false; if (knownPositiveOp->IsIntegralConst() && FitsIn<UINT32>(knownPositiveOp->AsIntConCommon()->IntegralValue())) { // BTW, we can fold the whole condition if op2 doesn't fit into UINT_MAX. knownPositiveFitsIntoU32 = true; } else if (knownPositiveOp->OperIs(GT_CAST) && varTypeIsLong(knownPositiveOp->CastToType()) && knownPositiveOp->AsCast()->CastOp()->OperIs(GT_ARR_LENGTH)) { knownPositiveFitsIntoU32 = true; // TODO-Casts: recognize Span.Length here as well. } if (!knownPositiveFitsIntoU32) { return cmp; } JITDUMP("Removing redundant cast(s) for:\n") DISPTREE(cmp) JITDUMP("\n\nto:\n\n") cmp->SetUnsigned(); // Drop cast from castOp if (knownPositiveIsOp2) { cmp->gtOp1 = castOp->AsCast()->CastOp(); } else { cmp->gtOp2 = castOp->AsCast()->CastOp(); } DEBUG_DESTROY_NODE(castOp); if (knownPositiveOp->OperIs(GT_CAST)) { // Drop cast from knownPositiveOp too if (knownPositiveIsOp2) { cmp->gtOp2 = knownPositiveOp->AsCast()->CastOp(); } else { cmp->gtOp1 = knownPositiveOp->AsCast()->CastOp(); } DEBUG_DESTROY_NODE(knownPositiveOp); } else { // Change type for constant from LONG to INT knownPositiveOp->ChangeType(TYP_INT); #ifndef TARGET_64BIT assert(knownPositiveOp->OperIs(GT_CNS_LNG)); knownPositiveOp->BashToConst(static_cast<int>(knownPositiveOp->AsIntConCommon()->IntegralValue())); #endif fgUpdateConstTreeValueNumber(knownPositiveOp); } DISPTREE(cmp) JITDUMP("\n") } return cmp; } //------------------------------------------------------------------------ // fgPropagateCommaThrow: propagate a "comma throw" up the tree. // // "Comma throws" in the compiler represent the canonical form of an always // throwing expression. They have the shape of COMMA(THROW, ZERO), to satisfy // the semantic that the original expression produced some value and are // generated by "gtFoldExprConst" when it encounters checked arithmetic that // will determinably overflow. // // In the global morphing phase, "comma throws" are "propagated" up the tree, // in post-order, to eliminate nodes that will never execute. This method, // called by "fgMorphSmpOp", encapsulates this optimization. // // Arguments: // parent - the node currently being processed. // commaThrow - the comma throw in question, "parent"'s operand. // precedingSideEffects - side effects of nodes preceding "comma" in execution order. // // Return Value: // If "parent" is to be replaced with a comma throw, i. e. the propagation was successful, // the new "parent", otherwise "nullptr", guaranteeing no state change, with one exception: // the "fgRemoveRestOfBlock" "global" may be set. Note that the new returned tree does not // have to be a "comma throw", it can be "bare" throw call if the "parent" node did not // produce any value. // // Notes: // "Comma throws" are very rare. // GenTree* Compiler::fgPropagateCommaThrow(GenTree* parent, GenTreeOp* commaThrow, GenTreeFlags precedingSideEffects) { // Comma throw propagation does not preserve VNs, and deletes nodes. assert(fgGlobalMorph); assert(fgIsCommaThrow(commaThrow)); if ((commaThrow->gtFlags & GTF_COLON_COND) == 0) { fgRemoveRestOfBlock = true; } if ((precedingSideEffects & GTF_ALL_EFFECT) == 0) { if (parent->TypeIs(TYP_VOID)) { // Return the throw node as the new tree. return commaThrow->gtGetOp1(); } // Fix up the COMMA's type if needed. if (genActualType(parent) != genActualType(commaThrow)) { commaThrow->gtGetOp2()->BashToZeroConst(genActualType(parent)); commaThrow->ChangeType(genActualType(parent)); } return commaThrow; } return nullptr; } //---------------------------------------------------------------------------------------------- // fgMorphRetInd: Try to get rid of extra IND(ADDR()) pairs in a return tree. // // Arguments: // node - The return node that uses an indirection. // // Return Value: // the original op1 of the ret if there was no optimization or an optimized new op1. // GenTree* Compiler::fgMorphRetInd(GenTreeUnOp* ret) { assert(ret->OperIs(GT_RETURN)); assert(ret->gtGetOp1()->OperIs(GT_IND, GT_BLK, GT_OBJ)); GenTreeIndir* ind = ret->gtGetOp1()->AsIndir(); GenTree* addr = ind->Addr(); if (addr->OperIs(GT_ADDR) && addr->gtGetOp1()->OperIs(GT_LCL_VAR)) { // If struct promotion was undone, adjust the annotations if (fgGlobalMorph && fgMorphImplicitByRefArgs(addr)) { return ind; } // If `return` retypes LCL_VAR as a smaller struct it should not set `doNotEnregister` on that // LclVar. // Example: in `Vector128:AsVector2` we have RETURN SIMD8(OBJ SIMD8(ADDR byref(LCL_VAR SIMD16))). GenTreeLclVar* lclVar = addr->gtGetOp1()->AsLclVar(); if (!lvaIsImplicitByRefLocal(lclVar->GetLclNum())) { assert(!gtIsActiveCSE_Candidate(addr) && !gtIsActiveCSE_Candidate(ind)); unsigned indSize; if (ind->OperIs(GT_IND)) { indSize = genTypeSize(ind); } else { indSize = ind->AsBlk()->GetLayout()->GetSize(); } LclVarDsc* varDsc = lvaGetDesc(lclVar); unsigned lclVarSize; if (!lclVar->TypeIs(TYP_STRUCT)) { lclVarSize = genTypeSize(varDsc->TypeGet()); } else { lclVarSize = varDsc->lvExactSize; } // TODO: change conditions in `canFold` to `indSize <= lclVarSize`, but currently do not support `BITCAST // int<-SIMD16` etc. assert((indSize <= lclVarSize) || varDsc->lvDoNotEnregister); #if defined(TARGET_64BIT) bool canFold = (indSize == lclVarSize); #else // !TARGET_64BIT // TODO: improve 32 bit targets handling for LONG returns if necessary, nowadays we do not support `BITCAST // long<->double` there. bool canFold = (indSize == lclVarSize) && (lclVarSize <= REGSIZE_BYTES); #endif // TODO: support `genReturnBB != nullptr`, it requires #11413 to avoid `Incompatible types for // gtNewTempAssign`. if (canFold && (genReturnBB == nullptr)) { // Fold (TYPE1)*(&(TYPE2)x) even if types do not match, lowering will handle it. // Getting rid of this IND(ADDR()) pair allows to keep lclVar as not address taken // and enregister it. DEBUG_DESTROY_NODE(ind); DEBUG_DESTROY_NODE(addr); ret->gtOp1 = lclVar; // We use GTF_DONT_CSE as an "is under GT_ADDR" check. We can // get rid of it now since the GT_RETURN node should never have // its address taken. assert((ret->gtFlags & GTF_DONT_CSE) == 0); lclVar->gtFlags &= ~GTF_DONT_CSE; return lclVar; } else if (!varDsc->lvDoNotEnregister) { lvaSetVarDoNotEnregister(lclVar->GetLclNum() DEBUGARG(DoNotEnregisterReason::BlockOpRet)); } } } return ind; } #ifdef _PREFAST_ #pragma warning(pop) #endif GenTree* Compiler::fgMorphSmpOpOptional(GenTreeOp* tree) { genTreeOps oper = tree->gtOper; GenTree* op1 = tree->gtOp1; GenTree* op2 = tree->gtOp2; var_types typ = tree->TypeGet(); if (fgGlobalMorph && GenTree::OperIsCommutative(oper)) { /* Swap the operands so that the more expensive one is 'op1' */ if (tree->gtFlags & GTF_REVERSE_OPS) { tree->gtOp1 = op2; tree->gtOp2 = op1; op2 = op1; op1 = tree->gtOp1; tree->gtFlags &= ~GTF_REVERSE_OPS; } if (oper == op2->gtOper) { /* Reorder nested operators at the same precedence level to be left-recursive. For example, change "(a+(b+c))" to the equivalent expression "((a+b)+c)". */ /* Things are handled differently for floating-point operators */ if (!varTypeIsFloating(tree->TypeGet())) { fgMoveOpsLeft(tree); op1 = tree->gtOp1; op2 = tree->gtOp2; } } } #if REARRANGE_ADDS /* Change "((x+icon)+y)" to "((x+y)+icon)" Don't reorder floating-point operations */ if (fgGlobalMorph && (oper == GT_ADD) && !tree->gtOverflow() && (op1->gtOper == GT_ADD) && !op1->gtOverflow() && varTypeIsIntegralOrI(typ)) { GenTree* ad1 = op1->AsOp()->gtOp1; GenTree* ad2 = op1->AsOp()->gtOp2; if (!op2->OperIsConst() && ad2->OperIsConst()) { // This takes // + (tree) // / \. // / \. // / \. // + (op1) op2 // / \. // / \. // ad1 ad2 // // and it swaps ad2 and op2. // Don't create a byref pointer that may point outside of the ref object. // If a GC happens, the byref won't get updated. This can happen if one // of the int components is negative. It also requires the address generation // be in a fully-interruptible code region. if (!varTypeIsGC(ad1->TypeGet()) && !varTypeIsGC(op2->TypeGet())) { tree->gtOp2 = ad2; op1->AsOp()->gtOp2 = op2; op1->gtFlags |= op2->gtFlags & GTF_ALL_EFFECT; op2 = tree->gtOp2; } } } #endif /*------------------------------------------------------------------------- * Perform optional oper-specific postorder morphing */ switch (oper) { case GT_ASG: // Make sure we're allowed to do this. if (optValnumCSE_phase) { // It is not safe to reorder/delete CSE's break; } if (varTypeIsStruct(typ) && !tree->IsPhiDefn()) { if (tree->OperIsCopyBlkOp()) { return fgMorphCopyBlock(tree); } else { return fgMorphInitBlock(tree); } } if (typ == TYP_LONG) { break; } if (op2->gtFlags & GTF_ASG) { break; } if ((op2->gtFlags & GTF_CALL) && (op1->gtFlags & GTF_ALL_EFFECT)) { break; } /* Special case: a cast that can be thrown away */ // TODO-Cleanup: fgMorphSmp does a similar optimization. However, it removes only // one cast and sometimes there is another one after it that gets removed by this // code. fgMorphSmp should be improved to remove all redundant casts so this code // can be removed. if (op1->gtOper == GT_IND && op2->gtOper == GT_CAST && !op2->gtOverflow()) { var_types srct; var_types cast; var_types dstt; srct = op2->AsCast()->CastOp()->TypeGet(); cast = (var_types)op2->CastToType(); dstt = op1->TypeGet(); /* Make sure these are all ints and precision is not lost */ if (genTypeSize(cast) >= genTypeSize(dstt) && dstt <= TYP_INT && srct <= TYP_INT) { op2 = tree->gtOp2 = op2->AsCast()->CastOp(); } } break; case GT_MUL: /* Check for the case "(val + icon) * icon" */ if (op2->gtOper == GT_CNS_INT && op1->gtOper == GT_ADD) { GenTree* add = op1->AsOp()->gtOp2; if (add->IsCnsIntOrI() && (op2->GetScaleIndexMul() != 0)) { if (tree->gtOverflow() || op1->gtOverflow()) { break; } ssize_t imul = op2->AsIntCon()->gtIconVal; ssize_t iadd = add->AsIntCon()->gtIconVal; /* Change '(val + iadd) * imul' -> '(val * imul) + (iadd * imul)' */ oper = GT_ADD; tree->ChangeOper(oper); op2->AsIntCon()->SetValueTruncating(iadd * imul); op1->ChangeOper(GT_MUL); add->AsIntCon()->SetIconValue(imul); } } break; case GT_DIV: /* For "val / 1", just return "val" */ if (op2->IsIntegralConst(1)) { DEBUG_DESTROY_NODE(tree); return op1; } break; case GT_UDIV: case GT_UMOD: tree->CheckDivideByConstOptimized(this); break; case GT_LSH: /* Check for the case "(val + icon) << icon" */ if (!optValnumCSE_phase && op2->IsCnsIntOrI() && op1->gtOper == GT_ADD && !op1->gtOverflow()) { GenTree* cns = op1->AsOp()->gtOp2; if (cns->IsCnsIntOrI() && (op2->GetScaleIndexShf() != 0)) { ssize_t ishf = op2->AsIntConCommon()->IconValue(); ssize_t iadd = cns->AsIntConCommon()->IconValue(); // printf("Changing '(val+icon1)<<icon2' into '(val<<icon2+icon1<<icon2)'\n"); /* Change "(val + iadd) << ishf" into "(val<<ishf + iadd<<ishf)" */ tree->ChangeOper(GT_ADD); // we are reusing the shift amount node here, but the type we want is that of the shift result op2->gtType = op1->gtType; op2->AsIntConCommon()->SetValueTruncating(iadd << ishf); if (cns->gtOper == GT_CNS_INT && cns->AsIntCon()->gtFieldSeq != nullptr && cns->AsIntCon()->gtFieldSeq->IsConstantIndexFieldSeq()) { assert(cns->AsIntCon()->gtFieldSeq->m_next == nullptr); op2->AsIntCon()->gtFieldSeq = cns->AsIntCon()->gtFieldSeq; } op1->ChangeOper(GT_LSH); cns->AsIntConCommon()->SetIconValue(ishf); } } break; case GT_XOR: if (!optValnumCSE_phase) { /* "x ^ -1" is "~x" */ if (op2->IsIntegralConst(-1)) { tree->ChangeOper(GT_NOT); tree->gtOp2 = nullptr; DEBUG_DESTROY_NODE(op2); } else if (op2->IsIntegralConst(1) && op1->OperIsCompare()) { /* "binaryVal ^ 1" is "!binaryVal" */ gtReverseCond(op1); DEBUG_DESTROY_NODE(op2); DEBUG_DESTROY_NODE(tree); return op1; } } break; case GT_INIT_VAL: // Initialization values for initBlk have special semantics - their lower // byte is used to fill the struct. However, we allow 0 as a "bare" value, // which enables them to get a VNForZero, and be propagated. if (op1->IsIntegralConst(0)) { return op1; } break; default: break; } return tree; } #if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS) //------------------------------------------------------------------------ // fgMorphMultiOp: Morph a GenTreeMultiOp (SIMD/HWINTRINSIC) tree. // // Arguments: // multiOp - The tree to morph // // Return Value: // The fully morphed tree. // GenTree* Compiler::fgMorphMultiOp(GenTreeMultiOp* multiOp) { gtUpdateNodeOperSideEffects(multiOp); bool dontCseConstArguments = false; #if defined(FEATURE_HW_INTRINSICS) // Opportunistically, avoid unexpected CSE for hw intrinsics with IMM arguments if (multiOp->OperIs(GT_HWINTRINSIC)) { NamedIntrinsic hwIntrinsic = multiOp->AsHWIntrinsic()->GetHWIntrinsicId(); #if defined(TARGET_XARCH) if (HWIntrinsicInfo::lookupCategory(hwIntrinsic) == HW_Category_IMM) { dontCseConstArguments = true; } #elif defined(TARGET_ARMARCH) if (HWIntrinsicInfo::HasImmediateOperand(hwIntrinsic)) { dontCseConstArguments = true; } #endif } #endif for (GenTree** use : multiOp->UseEdges()) { *use = fgMorphTree(*use); GenTree* operand = *use; multiOp->gtFlags |= (operand->gtFlags & GTF_ALL_EFFECT); if (dontCseConstArguments && operand->OperIsConst()) { operand->SetDoNotCSE(); } // Promoted structs after morph must be in one of two states: // a) Fully eliminated from the IR (independent promotion) OR only be // used by "special" nodes (e. g. LHS of ASGs for multi-reg structs). // b) Marked as do-not-enregister (dependent promotion). // // So here we preserve this invariant and mark any promoted structs as do-not-enreg. // if (operand->OperIs(GT_LCL_VAR) && lvaGetDesc(operand->AsLclVar())->lvPromoted) { lvaSetVarDoNotEnregister(operand->AsLclVar()->GetLclNum() DEBUGARG(DoNotEnregisterReason::SimdUserForcesDep)); } } #if defined(FEATURE_HW_INTRINSICS) if (opts.OptimizationEnabled() && multiOp->OperIs(GT_HWINTRINSIC)) { GenTreeHWIntrinsic* hw = multiOp->AsHWIntrinsic(); switch (hw->GetHWIntrinsicId()) { #if defined(TARGET_XARCH) case NI_SSE_Xor: case NI_SSE2_Xor: case NI_AVX_Xor: case NI_AVX2_Xor: { // Transform XOR(X, 0) to X for vectors GenTree* op1 = hw->Op(1); GenTree* op2 = hw->Op(2); if (!gtIsActiveCSE_Candidate(hw)) { if (op1->IsIntegralConstVector(0) && !gtIsActiveCSE_Candidate(op1)) { DEBUG_DESTROY_NODE(hw); DEBUG_DESTROY_NODE(op1); return op2; } if (op2->IsIntegralConstVector(0) && !gtIsActiveCSE_Candidate(op2)) { DEBUG_DESTROY_NODE(hw); DEBUG_DESTROY_NODE(op2); return op1; } } break; } #endif case NI_Vector128_Create: #if defined(TARGET_XARCH) case NI_Vector256_Create: #elif defined(TARGET_ARMARCH) case NI_Vector64_Create: #endif { bool hwAllArgsAreConst = true; for (GenTree** use : multiOp->UseEdges()) { if (!(*use)->OperIsConst()) { hwAllArgsAreConst = false; break; } } // Avoid unexpected CSE for constant arguments for Vector_.Create // but only if all arguments are constants. if (hwAllArgsAreConst) { for (GenTree** use : multiOp->UseEdges()) { (*use)->SetDoNotCSE(); } } } break; default: break; } } #endif // defined(FEATURE_HW_INTRINSICS) && defined(TARGET_XARCH) #ifdef FEATURE_HW_INTRINSICS if (multiOp->OperIsHWIntrinsic() && !optValnumCSE_phase) { return fgOptimizeHWIntrinsic(multiOp->AsHWIntrinsic()); } #endif return multiOp; } #endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS) //------------------------------------------------------------------------ // fgMorphModToSubMulDiv: Transform a % b into the equivalent a - (a / b) * b // (see ECMA III 3.55 and III.3.56). // // Arguments: // tree - The GT_MOD/GT_UMOD tree to morph // // Returns: // The morphed tree // // Notes: // For ARM64 we don't have a remainder instruction so this transform is // always done. For XARCH this transform is done if we know that magic // division will be used, in that case this transform allows CSE to // eliminate the redundant div from code like "x = a / 3; y = a % 3;". // GenTree* Compiler::fgMorphModToSubMulDiv(GenTreeOp* tree) { JITDUMP("\nMorphing MOD/UMOD [%06u] to Sub/Mul/Div\n", dspTreeID(tree)); if (tree->OperGet() == GT_MOD) { tree->SetOper(GT_DIV); } else if (tree->OperGet() == GT_UMOD) { tree->SetOper(GT_UDIV); } else { noway_assert(!"Illegal gtOper in fgMorphModToSubMulDiv"); } var_types type = tree->gtType; GenTree* const copyOfNumeratorValue = fgMakeMultiUse(&tree->gtOp1); GenTree* const copyOfDenominatorValue = fgMakeMultiUse(&tree->gtOp2); GenTree* const mul = gtNewOperNode(GT_MUL, type, tree, copyOfDenominatorValue); GenTree* const sub = gtNewOperNode(GT_SUB, type, copyOfNumeratorValue, mul); // Ensure "sub" does not evaluate "copyOfNumeratorValue" before it is defined by "mul". // sub->gtFlags |= GTF_REVERSE_OPS; #ifdef DEBUG sub->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif tree->CheckDivideByConstOptimized(this); return sub; } //------------------------------------------------------------------------------ // fgOperIsBitwiseRotationRoot : Check if the operation can be a root of a bitwise rotation tree. // // // Arguments: // oper - Operation to check // // Return Value: // True if the operation can be a root of a bitwise rotation tree; false otherwise. bool Compiler::fgOperIsBitwiseRotationRoot(genTreeOps oper) { return (oper == GT_OR) || (oper == GT_XOR); } //------------------------------------------------------------------------------ // fgRecognizeAndMorphBitwiseRotation : Check if the tree represents a left or right rotation. If so, return // an equivalent GT_ROL or GT_ROR tree; otherwise, return the original tree. // // Arguments: // tree - tree to check for a rotation pattern // // Return Value: // An equivalent GT_ROL or GT_ROR tree if a pattern is found; "nullptr" otherwise. // // Assumption: // The input is a GT_OR or a GT_XOR tree. GenTree* Compiler::fgRecognizeAndMorphBitwiseRotation(GenTree* tree) { // // Check for a rotation pattern, e.g., // // OR ROL // / \ / \. // LSH RSZ -> x y // / \ / \. // x AND x AND // / \ / \. // y 31 ADD 31 // / \. // NEG 32 // | // y // The patterns recognized: // (x << (y & M)) op (x >>> ((-y + N) & M)) // (x >>> ((-y + N) & M)) op (x << (y & M)) // // (x << y) op (x >>> (-y + N)) // (x >> > (-y + N)) op (x << y) // // (x >>> (y & M)) op (x << ((-y + N) & M)) // (x << ((-y + N) & M)) op (x >>> (y & M)) // // (x >>> y) op (x << (-y + N)) // (x << (-y + N)) op (x >>> y) // // (x << c1) op (x >>> c2) // (x >>> c1) op (x << c2) // // where // c1 and c2 are const // c1 + c2 == bitsize(x) // N == bitsize(x) // M is const // M & (N - 1) == N - 1 // op is either | or ^ if (((tree->gtFlags & GTF_PERSISTENT_SIDE_EFFECTS) != 0) || ((tree->gtFlags & GTF_ORDER_SIDEEFF) != 0)) { // We can't do anything if the tree has assignments, calls, or volatile // reads. Note that we allow GTF_EXCEPT side effect since any exceptions // thrown by the original tree will be thrown by the transformed tree as well. return nullptr; } genTreeOps oper = tree->OperGet(); assert(fgOperIsBitwiseRotationRoot(oper)); // Check if we have an LSH on one side of the OR and an RSZ on the other side. GenTree* op1 = tree->gtGetOp1(); GenTree* op2 = tree->gtGetOp2(); GenTree* leftShiftTree = nullptr; GenTree* rightShiftTree = nullptr; if ((op1->OperGet() == GT_LSH) && (op2->OperGet() == GT_RSZ)) { leftShiftTree = op1; rightShiftTree = op2; } else if ((op1->OperGet() == GT_RSZ) && (op2->OperGet() == GT_LSH)) { leftShiftTree = op2; rightShiftTree = op1; } else { return nullptr; } // Check if the trees representing the value to shift are identical. // We already checked that there are no side effects above. if (GenTree::Compare(leftShiftTree->gtGetOp1(), rightShiftTree->gtGetOp1())) { GenTree* rotatedValue = leftShiftTree->gtGetOp1(); var_types rotatedValueActualType = genActualType(rotatedValue->gtType); ssize_t rotatedValueBitSize = genTypeSize(rotatedValueActualType) * 8; noway_assert((rotatedValueBitSize == 32) || (rotatedValueBitSize == 64)); GenTree* leftShiftIndex = leftShiftTree->gtGetOp2(); GenTree* rightShiftIndex = rightShiftTree->gtGetOp2(); // The shift index may be masked. At least (rotatedValueBitSize - 1) lower bits // shouldn't be masked for the transformation to be valid. If additional // higher bits are not masked, the transformation is still valid since the result // of MSIL shift instructions is unspecified if the shift amount is greater or equal // than the width of the value being shifted. ssize_t minimalMask = rotatedValueBitSize - 1; ssize_t leftShiftMask = -1; ssize_t rightShiftMask = -1; if ((leftShiftIndex->OperGet() == GT_AND)) { if (leftShiftIndex->gtGetOp2()->IsCnsIntOrI()) { leftShiftMask = leftShiftIndex->gtGetOp2()->AsIntCon()->gtIconVal; leftShiftIndex = leftShiftIndex->gtGetOp1(); } else { return nullptr; } } if ((rightShiftIndex->OperGet() == GT_AND)) { if (rightShiftIndex->gtGetOp2()->IsCnsIntOrI()) { rightShiftMask = rightShiftIndex->gtGetOp2()->AsIntCon()->gtIconVal; rightShiftIndex = rightShiftIndex->gtGetOp1(); } else { return nullptr; } } if (((minimalMask & leftShiftMask) != minimalMask) || ((minimalMask & rightShiftMask) != minimalMask)) { // The shift index is overmasked, e.g., we have // something like (x << y & 15) or // (x >> (32 - y) & 15 with 32 bit x. // The transformation is not valid. return nullptr; } GenTree* shiftIndexWithAdd = nullptr; GenTree* shiftIndexWithoutAdd = nullptr; genTreeOps rotateOp = GT_NONE; GenTree* rotateIndex = nullptr; if (leftShiftIndex->OperGet() == GT_ADD) { shiftIndexWithAdd = leftShiftIndex; shiftIndexWithoutAdd = rightShiftIndex; rotateOp = GT_ROR; } else if (rightShiftIndex->OperGet() == GT_ADD) { shiftIndexWithAdd = rightShiftIndex; shiftIndexWithoutAdd = leftShiftIndex; rotateOp = GT_ROL; } if (shiftIndexWithAdd != nullptr) { if (shiftIndexWithAdd->gtGetOp2()->IsCnsIntOrI()) { if (shiftIndexWithAdd->gtGetOp2()->AsIntCon()->gtIconVal == rotatedValueBitSize) { if (shiftIndexWithAdd->gtGetOp1()->OperGet() == GT_NEG) { if (GenTree::Compare(shiftIndexWithAdd->gtGetOp1()->gtGetOp1(), shiftIndexWithoutAdd)) { // We found one of these patterns: // (x << (y & M)) | (x >>> ((-y + N) & M)) // (x << y) | (x >>> (-y + N)) // (x >>> (y & M)) | (x << ((-y + N) & M)) // (x >>> y) | (x << (-y + N)) // where N == bitsize(x), M is const, and // M & (N - 1) == N - 1 CLANG_FORMAT_COMMENT_ANCHOR; #ifndef TARGET_64BIT if (!shiftIndexWithoutAdd->IsCnsIntOrI() && (rotatedValueBitSize == 64)) { // TODO-X86-CQ: we need to handle variable-sized long shifts specially on x86. // GT_LSH, GT_RSH, and GT_RSZ have helpers for this case. We may need // to add helpers for GT_ROL and GT_ROR. return nullptr; } #endif rotateIndex = shiftIndexWithoutAdd; } } } } } else if ((leftShiftIndex->IsCnsIntOrI() && rightShiftIndex->IsCnsIntOrI())) { if (leftShiftIndex->AsIntCon()->gtIconVal + rightShiftIndex->AsIntCon()->gtIconVal == rotatedValueBitSize) { // We found this pattern: // (x << c1) | (x >>> c2) // where c1 and c2 are const and c1 + c2 == bitsize(x) rotateOp = GT_ROL; rotateIndex = leftShiftIndex; } } if (rotateIndex != nullptr) { noway_assert(GenTree::OperIsRotate(rotateOp)); GenTreeFlags inputTreeEffects = tree->gtFlags & GTF_ALL_EFFECT; // We can use the same tree only during global morph; reusing the tree in a later morph // may invalidate value numbers. if (fgGlobalMorph) { tree->AsOp()->gtOp1 = rotatedValue; tree->AsOp()->gtOp2 = rotateIndex; tree->ChangeOper(rotateOp); unsigned childFlags = 0; for (GenTree* op : tree->Operands()) { childFlags |= (op->gtFlags & GTF_ALL_EFFECT); } // The parent's flags should be a superset of its operands' flags noway_assert((inputTreeEffects & childFlags) == childFlags); } else { tree = gtNewOperNode(rotateOp, rotatedValueActualType, rotatedValue, rotateIndex); noway_assert(inputTreeEffects == (tree->gtFlags & GTF_ALL_EFFECT)); } return tree; } } return nullptr; } #if !defined(TARGET_64BIT) //------------------------------------------------------------------------------ // fgRecognizeAndMorphLongMul : Check for and morph long multiplication with 32 bit operands. // // Uses "GenTree::IsValidLongMul" to check for the long multiplication pattern. Will swap // operands if the first one is a constant and the second one is not, even for trees which // end up not being eligibile for long multiplication. // // Arguments: // mul - GT_MUL tree to check for a long multiplication opportunity // // Return Value: // The original tree, with operands possibly swapped, if it is not eligible for long multiplication. // Tree with GTF_MUL_64RSLT set, side effect flags propagated, and children morphed if it is. // GenTreeOp* Compiler::fgRecognizeAndMorphLongMul(GenTreeOp* mul) { assert(mul->OperIs(GT_MUL)); assert(mul->TypeIs(TYP_LONG)); GenTree* op1 = mul->gtGetOp1(); GenTree* op2 = mul->gtGetOp2(); // "IsValidLongMul" and decomposition do not handle constant op1. if (op1->IsIntegralConst()) { std::swap(op1, op2); mul->gtOp1 = op1; mul->gtOp2 = op2; } if (!mul->IsValidLongMul()) { return mul; } // MUL_LONG needs to do the work the casts would have done. mul->ClearUnsigned(); if (op1->IsUnsigned()) { mul->SetUnsigned(); } // "IsValidLongMul" returned "true", so this GT_MUL cannot overflow. mul->ClearOverflow(); mul->Set64RsltMul(); return fgMorphLongMul(mul); } //------------------------------------------------------------------------------ // fgMorphLongMul : Morphs GT_MUL nodes marked with GTF_MUL_64RSLT. // // Morphs *only* the operands of casts that compose the long mul to // avoid them being folded aways. // // Arguments: // mul - GT_MUL tree to morph operands of // // Return Value: // The original tree, with operands morphed and flags propagated. // GenTreeOp* Compiler::fgMorphLongMul(GenTreeOp* mul) { INDEBUG(mul->DebugCheckLongMul()); GenTree* op1 = mul->gtGetOp1(); GenTree* op2 = mul->gtGetOp2(); // Morph the operands. We cannot allow the casts to go away, so we morph their operands directly. op1->AsCast()->CastOp() = fgMorphTree(op1->AsCast()->CastOp()); op1->SetAllEffectsFlags(op1->AsCast()->CastOp()); if (op2->OperIs(GT_CAST)) { op2->AsCast()->CastOp() = fgMorphTree(op2->AsCast()->CastOp()); op2->SetAllEffectsFlags(op2->AsCast()->CastOp()); } mul->SetAllEffectsFlags(op1, op2); op1->SetDoNotCSE(); op2->SetDoNotCSE(); return mul; } #endif // !defined(TARGET_64BIT) /***************************************************************************** * * Transform the given tree for code generation and return an equivalent tree. */ GenTree* Compiler::fgMorphTree(GenTree* tree, MorphAddrContext* mac) { assert(tree); #ifdef DEBUG if (verbose) { if ((unsigned)JitConfig.JitBreakMorphTree() == tree->gtTreeID) { noway_assert(!"JitBreakMorphTree hit"); } } #endif #ifdef DEBUG int thisMorphNum = 0; if (verbose && treesBeforeAfterMorph) { thisMorphNum = morphNum++; printf("\nfgMorphTree (before %d):\n", thisMorphNum); gtDispTree(tree); } #endif if (fgGlobalMorph) { // Apply any rewrites for implicit byref arguments before morphing the // tree. if (fgMorphImplicitByRefArgs(tree)) { #ifdef DEBUG if (verbose && treesBeforeAfterMorph) { printf("\nfgMorphTree (%d), after implicit-byref rewrite:\n", thisMorphNum); gtDispTree(tree); } #endif } } /*------------------------------------------------------------------------- * fgMorphTree() can potentially replace a tree with another, and the * caller has to store the return value correctly. * Turn this on to always make copy of "tree" here to shake out * hidden/unupdated references. */ #ifdef DEBUG if (compStressCompile(STRESS_GENERIC_CHECK, 0)) { GenTree* copy; if (GenTree::s_gtNodeSizes[tree->gtOper] == TREE_NODE_SZ_SMALL) { copy = gtNewLargeOperNode(GT_ADD, TYP_INT); } else { copy = new (this, GT_CALL) GenTreeCall(TYP_INT); } copy->ReplaceWith(tree, this); #if defined(LATE_DISASM) // GT_CNS_INT is considered small, so ReplaceWith() won't copy all fields if ((tree->gtOper == GT_CNS_INT) && tree->IsIconHandle()) { copy->AsIntCon()->gtCompileTimeHandle = tree->AsIntCon()->gtCompileTimeHandle; } #endif DEBUG_DESTROY_NODE(tree); tree = copy; } #endif // DEBUG if (fgGlobalMorph) { /* Ensure that we haven't morphed this node already */ assert(((tree->gtDebugFlags & GTF_DEBUG_NODE_MORPHED) == 0) && "ERROR: Already morphed this node!"); /* Before morphing the tree, we try to propagate any active assertions */ if (optLocalAssertionProp) { /* Do we have any active assertions? */ if (optAssertionCount > 0) { GenTree* newTree = tree; while (newTree != nullptr) { tree = newTree; /* newTree is non-Null if we propagated an assertion */ newTree = optAssertionProp(apFull, tree, nullptr, nullptr); } assert(tree != nullptr); } } PREFAST_ASSUME(tree != nullptr); } /* Save the original un-morphed tree for fgMorphTreeDone */ GenTree* oldTree = tree; /* Figure out what kind of a node we have */ unsigned kind = tree->OperKind(); /* Is this a constant node? */ if (tree->OperIsConst()) { tree = fgMorphConst(tree); goto DONE; } /* Is this a leaf node? */ if (kind & GTK_LEAF) { tree = fgMorphLeaf(tree); goto DONE; } /* Is it a 'simple' unary/binary operator? */ if (kind & GTK_SMPOP) { tree = fgMorphSmpOp(tree, mac); goto DONE; } /* See what kind of a special operator we have here */ switch (tree->OperGet()) { case GT_CALL: if (tree->OperMayThrow(this)) { tree->gtFlags |= GTF_EXCEPT; } else { tree->gtFlags &= ~GTF_EXCEPT; } tree = fgMorphCall(tree->AsCall()); break; #if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS) #if defined(FEATURE_SIMD) case GT_SIMD: #endif #if defined(FEATURE_HW_INTRINSICS) case GT_HWINTRINSIC: #endif tree = fgMorphMultiOp(tree->AsMultiOp()); break; #endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS) case GT_ARR_ELEM: tree->AsArrElem()->gtArrObj = fgMorphTree(tree->AsArrElem()->gtArrObj); unsigned dim; for (dim = 0; dim < tree->AsArrElem()->gtArrRank; dim++) { tree->AsArrElem()->gtArrInds[dim] = fgMorphTree(tree->AsArrElem()->gtArrInds[dim]); } tree->gtFlags &= ~GTF_CALL; tree->gtFlags |= tree->AsArrElem()->gtArrObj->gtFlags & GTF_ALL_EFFECT; for (dim = 0; dim < tree->AsArrElem()->gtArrRank; dim++) { tree->gtFlags |= tree->AsArrElem()->gtArrInds[dim]->gtFlags & GTF_ALL_EFFECT; } if (fgGlobalMorph) { fgSetRngChkTarget(tree, false); } break; case GT_ARR_OFFSET: tree->AsArrOffs()->gtOffset = fgMorphTree(tree->AsArrOffs()->gtOffset); tree->AsArrOffs()->gtIndex = fgMorphTree(tree->AsArrOffs()->gtIndex); tree->AsArrOffs()->gtArrObj = fgMorphTree(tree->AsArrOffs()->gtArrObj); tree->gtFlags &= ~GTF_CALL; tree->gtFlags |= tree->AsArrOffs()->gtOffset->gtFlags & GTF_ALL_EFFECT; tree->gtFlags |= tree->AsArrOffs()->gtIndex->gtFlags & GTF_ALL_EFFECT; tree->gtFlags |= tree->AsArrOffs()->gtArrObj->gtFlags & GTF_ALL_EFFECT; if (fgGlobalMorph) { fgSetRngChkTarget(tree, false); } break; case GT_PHI: tree->gtFlags &= ~GTF_ALL_EFFECT; for (GenTreePhi::Use& use : tree->AsPhi()->Uses()) { use.SetNode(fgMorphTree(use.GetNode())); tree->gtFlags |= use.GetNode()->gtFlags & GTF_ALL_EFFECT; } break; case GT_FIELD_LIST: tree->gtFlags &= ~GTF_ALL_EFFECT; for (GenTreeFieldList::Use& use : tree->AsFieldList()->Uses()) { use.SetNode(fgMorphTree(use.GetNode())); tree->gtFlags |= (use.GetNode()->gtFlags & GTF_ALL_EFFECT); } break; case GT_CMPXCHG: tree->AsCmpXchg()->gtOpLocation = fgMorphTree(tree->AsCmpXchg()->gtOpLocation); tree->AsCmpXchg()->gtOpValue = fgMorphTree(tree->AsCmpXchg()->gtOpValue); tree->AsCmpXchg()->gtOpComparand = fgMorphTree(tree->AsCmpXchg()->gtOpComparand); tree->gtFlags &= (~GTF_EXCEPT & ~GTF_CALL); tree->gtFlags |= tree->AsCmpXchg()->gtOpLocation->gtFlags & GTF_ALL_EFFECT; tree->gtFlags |= tree->AsCmpXchg()->gtOpValue->gtFlags & GTF_ALL_EFFECT; tree->gtFlags |= tree->AsCmpXchg()->gtOpComparand->gtFlags & GTF_ALL_EFFECT; break; case GT_STORE_DYN_BLK: tree = fgMorphStoreDynBlock(tree->AsStoreDynBlk()); break; default: #ifdef DEBUG gtDispTree(tree); #endif noway_assert(!"unexpected operator"); } DONE: fgMorphTreeDone(tree, oldTree DEBUGARG(thisMorphNum)); return tree; } //------------------------------------------------------------------------ // fgKillDependentAssertionsSingle: Kill all assertions specific to lclNum // // Arguments: // lclNum - The varNum of the lclVar for which we're killing assertions. // tree - (DEBUG only) the tree responsible for killing its assertions. // void Compiler::fgKillDependentAssertionsSingle(unsigned lclNum DEBUGARG(GenTree* tree)) { /* All dependent assertions are killed here */ ASSERT_TP killed = BitVecOps::MakeCopy(apTraits, GetAssertionDep(lclNum)); if (killed) { AssertionIndex index = optAssertionCount; while (killed && (index > 0)) { if (BitVecOps::IsMember(apTraits, killed, index - 1)) { #ifdef DEBUG AssertionDsc* curAssertion = optGetAssertion(index); noway_assert((curAssertion->op1.lcl.lclNum == lclNum) || ((curAssertion->op2.kind == O2K_LCLVAR_COPY) && (curAssertion->op2.lcl.lclNum == lclNum))); if (verbose) { printf("\nThe assignment "); printTreeID(tree); printf(" using V%02u removes: ", curAssertion->op1.lcl.lclNum); optPrintAssertion(curAssertion); } #endif // Remove this bit from the killed mask BitVecOps::RemoveElemD(apTraits, killed, index - 1); optAssertionRemove(index); } index--; } // killed mask should now be zero noway_assert(BitVecOps::IsEmpty(apTraits, killed)); } } //------------------------------------------------------------------------ // fgKillDependentAssertions: Kill all dependent assertions with regard to lclNum. // // Arguments: // lclNum - The varNum of the lclVar for which we're killing assertions. // tree - (DEBUG only) the tree responsible for killing its assertions. // // Notes: // For structs and struct fields, it will invalidate the children and parent // respectively. // Calls fgKillDependentAssertionsSingle to kill the assertions for a single lclVar. // void Compiler::fgKillDependentAssertions(unsigned lclNum DEBUGARG(GenTree* tree)) { LclVarDsc* varDsc = lvaGetDesc(lclNum); if (varDsc->lvPromoted) { noway_assert(varTypeIsStruct(varDsc)); // Kill the field locals. for (unsigned i = varDsc->lvFieldLclStart; i < varDsc->lvFieldLclStart + varDsc->lvFieldCnt; ++i) { fgKillDependentAssertionsSingle(i DEBUGARG(tree)); } // Kill the struct local itself. fgKillDependentAssertionsSingle(lclNum DEBUGARG(tree)); } else if (varDsc->lvIsStructField) { // Kill the field local. fgKillDependentAssertionsSingle(lclNum DEBUGARG(tree)); // Kill the parent struct. fgKillDependentAssertionsSingle(varDsc->lvParentLcl DEBUGARG(tree)); } else { fgKillDependentAssertionsSingle(lclNum DEBUGARG(tree)); } } /***************************************************************************** * * This function is called to complete the morphing of a tree node * It should only be called once for each node. * If DEBUG is defined the flag GTF_DEBUG_NODE_MORPHED is checked and updated, * to enforce the invariant that each node is only morphed once. * If local assertion prop is enabled the result tree may be replaced * by an equivalent tree. * */ void Compiler::fgMorphTreeDone(GenTree* tree, GenTree* oldTree /* == NULL */ DEBUGARG(int morphNum)) { #ifdef DEBUG if (verbose && treesBeforeAfterMorph) { printf("\nfgMorphTree (after %d):\n", morphNum); gtDispTree(tree); printf(""); // in our logic this causes a flush } #endif if (!fgGlobalMorph) { return; } if ((oldTree != nullptr) && (oldTree != tree)) { /* Ensure that we have morphed this node */ assert((tree->gtDebugFlags & GTF_DEBUG_NODE_MORPHED) && "ERROR: Did not morph this node!"); #ifdef DEBUG TransferTestDataToNode(oldTree, tree); #endif } else { // Ensure that we haven't morphed this node already assert(((tree->gtDebugFlags & GTF_DEBUG_NODE_MORPHED) == 0) && "ERROR: Already morphed this node!"); } if (tree->OperIsConst()) { goto DONE; } if (!optLocalAssertionProp) { goto DONE; } /* Do we have any active assertions? */ if (optAssertionCount > 0) { /* Is this an assignment to a local variable */ GenTreeLclVarCommon* lclVarTree = nullptr; // The check below will miss LIR-style assignments. // // But we shouldn't be running local assertion prop on these, // as local prop gets disabled when we run global prop. assert(!tree->OperIs(GT_STORE_LCL_VAR, GT_STORE_LCL_FLD)); // DefinesLocal can return true for some BLK op uses, so // check what gets assigned only when we're at an assignment. if (tree->OperIs(GT_ASG) && tree->DefinesLocal(this, &lclVarTree)) { unsigned lclNum = lclVarTree->GetLclNum(); noway_assert(lclNum < lvaCount); fgKillDependentAssertions(lclNum DEBUGARG(tree)); } } /* If this tree makes a new assertion - make it available */ optAssertionGen(tree); DONE:; #ifdef DEBUG /* Mark this node as being morphed */ tree->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif } //------------------------------------------------------------------------ // fgFoldConditional: try and fold conditionals and optimize BBJ_COND or // BBJ_SWITCH blocks. // // Argumetns: // block - block to examine // // Returns: // FoldResult indicating what changes were made, if any // Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) { FoldResult result = FoldResult::FOLD_DID_NOTHING; // We don't want to make any code unreachable // if (opts.OptimizationDisabled()) { return result; } if (block->bbJumpKind == BBJ_COND) { noway_assert(block->bbStmtList != nullptr && block->bbStmtList->GetPrevStmt() != nullptr); Statement* lastStmt = block->lastStmt(); noway_assert(lastStmt->GetNextStmt() == nullptr); if (lastStmt->GetRootNode()->gtOper == GT_CALL) { noway_assert(fgRemoveRestOfBlock); // Unconditional throw - transform the basic block into a BBJ_THROW // fgConvertBBToThrowBB(block); result = FoldResult::FOLD_CHANGED_CONTROL_FLOW; JITDUMP("\nConditional folded at " FMT_BB "\n", block->bbNum); JITDUMP(FMT_BB " becomes a BBJ_THROW\n", block->bbNum); return result; } noway_assert(lastStmt->GetRootNode()->gtOper == GT_JTRUE); /* Did we fold the conditional */ noway_assert(lastStmt->GetRootNode()->AsOp()->gtOp1); GenTree* condTree; condTree = lastStmt->GetRootNode()->AsOp()->gtOp1; GenTree* cond; cond = condTree->gtEffectiveVal(true); if (cond->OperIsConst()) { /* Yupee - we folded the conditional! * Remove the conditional statement */ noway_assert(cond->gtOper == GT_CNS_INT); noway_assert((block->bbNext->countOfInEdges() > 0) && (block->bbJumpDest->countOfInEdges() > 0)); if (condTree != cond) { // Preserve any side effects assert(condTree->OperIs(GT_COMMA)); lastStmt->SetRootNode(condTree); result = FoldResult::FOLD_ALTERED_LAST_STMT; } else { // no side effects, remove the jump entirely fgRemoveStmt(block, lastStmt); result = FoldResult::FOLD_REMOVED_LAST_STMT; } // block is a BBJ_COND that we are folding the conditional for. // bTaken is the path that will always be taken from block. // bNotTaken is the path that will never be taken from block. // BasicBlock* bTaken; BasicBlock* bNotTaken; if (cond->AsIntCon()->gtIconVal != 0) { /* JTRUE 1 - transform the basic block into a BBJ_ALWAYS */ block->bbJumpKind = BBJ_ALWAYS; bTaken = block->bbJumpDest; bNotTaken = block->bbNext; } else { /* Unmark the loop if we are removing a backwards branch */ /* dest block must also be marked as a loop head and */ /* We must be able to reach the backedge block */ if ((block->bbJumpDest->isLoopHead()) && (block->bbJumpDest->bbNum <= block->bbNum) && fgReachable(block->bbJumpDest, block)) { optUnmarkLoopBlocks(block->bbJumpDest, block); } /* JTRUE 0 - transform the basic block into a BBJ_NONE */ block->bbJumpKind = BBJ_NONE; bTaken = block->bbNext; bNotTaken = block->bbJumpDest; } if (fgHaveValidEdgeWeights) { // We are removing an edge from block to bNotTaken // and we have already computed the edge weights, so // we will try to adjust some of the weights // flowList* edgeTaken = fgGetPredForBlock(bTaken, block); BasicBlock* bUpdated = nullptr; // non-NULL if we updated the weight of an internal block // We examine the taken edge (block -> bTaken) // if block has valid profile weight and bTaken does not we try to adjust bTaken's weight // else if bTaken has valid profile weight and block does not we try to adjust block's weight // We can only adjust the block weights when (the edge block -> bTaken) is the only edge into bTaken // if (block->hasProfileWeight()) { // The edge weights for (block -> bTaken) are 100% of block's weight edgeTaken->setEdgeWeights(block->bbWeight, block->bbWeight, bTaken); if (!bTaken->hasProfileWeight()) { if ((bTaken->countOfInEdges() == 1) || (bTaken->bbWeight < block->bbWeight)) { // Update the weight of bTaken bTaken->inheritWeight(block); bUpdated = bTaken; } } } else if (bTaken->hasProfileWeight()) { if (bTaken->countOfInEdges() == 1) { // There is only one in edge to bTaken edgeTaken->setEdgeWeights(bTaken->bbWeight, bTaken->bbWeight, bTaken); // Update the weight of block block->inheritWeight(bTaken); bUpdated = block; } } if (bUpdated != nullptr) { weight_t newMinWeight; weight_t newMaxWeight; flowList* edge; // Now fix the weights of the edges out of 'bUpdated' switch (bUpdated->bbJumpKind) { case BBJ_NONE: edge = fgGetPredForBlock(bUpdated->bbNext, bUpdated); newMaxWeight = bUpdated->bbWeight; newMinWeight = min(edge->edgeWeightMin(), newMaxWeight); edge->setEdgeWeights(newMinWeight, newMaxWeight, bUpdated->bbNext); break; case BBJ_COND: edge = fgGetPredForBlock(bUpdated->bbNext, bUpdated); newMaxWeight = bUpdated->bbWeight; newMinWeight = min(edge->edgeWeightMin(), newMaxWeight); edge->setEdgeWeights(newMinWeight, newMaxWeight, bUpdated->bbNext); FALLTHROUGH; case BBJ_ALWAYS: edge = fgGetPredForBlock(bUpdated->bbJumpDest, bUpdated); newMaxWeight = bUpdated->bbWeight; newMinWeight = min(edge->edgeWeightMin(), newMaxWeight); edge->setEdgeWeights(newMinWeight, newMaxWeight, bUpdated->bbNext); break; default: // We don't handle BBJ_SWITCH break; } } } /* modify the flow graph */ /* Remove 'block' from the predecessor list of 'bNotTaken' */ fgRemoveRefPred(bNotTaken, block); #ifdef DEBUG if (verbose) { printf("\nConditional folded at " FMT_BB "\n", block->bbNum); printf(FMT_BB " becomes a %s", block->bbNum, block->bbJumpKind == BBJ_ALWAYS ? "BBJ_ALWAYS" : "BBJ_NONE"); if (block->bbJumpKind == BBJ_ALWAYS) { printf(" to " FMT_BB, block->bbJumpDest->bbNum); } printf("\n"); } #endif /* if the block was a loop condition we may have to modify * the loop table */ for (unsigned loopNum = 0; loopNum < optLoopCount; loopNum++) { /* Some loops may have been already removed by * loop unrolling or conditional folding */ if (optLoopTable[loopNum].lpFlags & LPFLG_REMOVED) { continue; } /* We are only interested in the loop bottom */ if (optLoopTable[loopNum].lpBottom == block) { if (cond->AsIntCon()->gtIconVal == 0) { /* This was a bogus loop (condition always false) * Remove the loop from the table */ optMarkLoopRemoved(loopNum); optLoopTable[loopNum].lpTop->unmarkLoopAlign(this DEBUG_ARG("Bogus loop")); #ifdef DEBUG if (verbose) { printf("Removing loop " FMT_LP " (from " FMT_BB " to " FMT_BB ")\n\n", loopNum, optLoopTable[loopNum].lpTop->bbNum, optLoopTable[loopNum].lpBottom->bbNum); } #endif } } } } } else if (block->bbJumpKind == BBJ_SWITCH) { noway_assert(block->bbStmtList != nullptr && block->bbStmtList->GetPrevStmt() != nullptr); Statement* lastStmt = block->lastStmt(); noway_assert(lastStmt->GetNextStmt() == nullptr); if (lastStmt->GetRootNode()->gtOper == GT_CALL) { noway_assert(fgRemoveRestOfBlock); // Unconditional throw - transform the basic block into a BBJ_THROW // fgConvertBBToThrowBB(block); result = FoldResult::FOLD_CHANGED_CONTROL_FLOW; JITDUMP("\nConditional folded at " FMT_BB "\n", block->bbNum); JITDUMP(FMT_BB " becomes a BBJ_THROW\n", block->bbNum); return result; } noway_assert(lastStmt->GetRootNode()->gtOper == GT_SWITCH); /* Did we fold the conditional */ noway_assert(lastStmt->GetRootNode()->AsOp()->gtOp1); GenTree* condTree; condTree = lastStmt->GetRootNode()->AsOp()->gtOp1; GenTree* cond; cond = condTree->gtEffectiveVal(true); if (cond->OperIsConst()) { /* Yupee - we folded the conditional! * Remove the conditional statement */ noway_assert(cond->gtOper == GT_CNS_INT); if (condTree != cond) { // Preserve any side effects assert(condTree->OperIs(GT_COMMA)); lastStmt->SetRootNode(condTree); result = FoldResult::FOLD_ALTERED_LAST_STMT; } else { // no side effects, remove the switch entirely fgRemoveStmt(block, lastStmt); result = FoldResult::FOLD_REMOVED_LAST_STMT; } /* modify the flow graph */ /* Find the actual jump target */ unsigned switchVal; switchVal = (unsigned)cond->AsIntCon()->gtIconVal; unsigned jumpCnt; jumpCnt = block->bbJumpSwt->bbsCount; BasicBlock** jumpTab; jumpTab = block->bbJumpSwt->bbsDstTab; bool foundVal; foundVal = false; for (unsigned val = 0; val < jumpCnt; val++, jumpTab++) { BasicBlock* curJump = *jumpTab; assert(curJump->countOfInEdges() > 0); // If val matches switchVal or we are at the last entry and // we never found the switch value then set the new jump dest if ((val == switchVal) || (!foundVal && (val == jumpCnt - 1))) { if (curJump != block->bbNext) { /* transform the basic block into a BBJ_ALWAYS */ block->bbJumpKind = BBJ_ALWAYS; block->bbJumpDest = curJump; } else { /* transform the basic block into a BBJ_NONE */ block->bbJumpKind = BBJ_NONE; } foundVal = true; } else { /* Remove 'block' from the predecessor list of 'curJump' */ fgRemoveRefPred(curJump, block); } } assert(foundVal); #ifdef DEBUG if (verbose) { printf("\nConditional folded at " FMT_BB "\n", block->bbNum); printf(FMT_BB " becomes a %s", block->bbNum, block->bbJumpKind == BBJ_ALWAYS ? "BBJ_ALWAYS" : "BBJ_NONE"); if (block->bbJumpKind == BBJ_ALWAYS) { printf(" to " FMT_BB, block->bbJumpDest->bbNum); } printf("\n"); } #endif } } return result; } //------------------------------------------------------------------------ // fgMorphBlockStmt: morph a single statement in a block. // // Arguments: // block - block containing the statement // stmt - statement to morph // msg - string to identify caller in a dump // // Returns: // true if 'stmt' was removed from the block. // s false if 'stmt' is still in the block (even if other statements were removed). // // Notes: // Can be called anytime, unlike fgMorphStmts() which should only be called once. // bool Compiler::fgMorphBlockStmt(BasicBlock* block, Statement* stmt DEBUGARG(const char* msg)) { assert(block != nullptr); assert(stmt != nullptr); // Reset some ambient state fgRemoveRestOfBlock = false; compCurBB = block; compCurStmt = stmt; GenTree* morph = fgMorphTree(stmt->GetRootNode()); // Bug 1106830 - During the CSE phase we can't just remove // morph->AsOp()->gtOp2 as it could contain CSE expressions. // This leads to a noway_assert in OptCSE.cpp when // searching for the removed CSE ref. (using gtFindLink) // if (!optValnumCSE_phase) { // Check for morph as a GT_COMMA with an unconditional throw if (fgIsCommaThrow(morph, true)) { #ifdef DEBUG if (verbose) { printf("Folding a top-level fgIsCommaThrow stmt\n"); printf("Removing op2 as unreachable:\n"); gtDispTree(morph->AsOp()->gtOp2); printf("\n"); } #endif // Use the call as the new stmt morph = morph->AsOp()->gtOp1; noway_assert(morph->gtOper == GT_CALL); } // we can get a throw as a statement root if (fgIsThrow(morph)) { #ifdef DEBUG if (verbose) { printf("We have a top-level fgIsThrow stmt\n"); printf("Removing the rest of block as unreachable:\n"); } #endif noway_assert((morph->gtFlags & GTF_COLON_COND) == 0); fgRemoveRestOfBlock = true; } } stmt->SetRootNode(morph); // Can the entire tree be removed? bool removedStmt = false; // Defer removing statements during CSE so we don't inadvertently remove any CSE defs. if (!optValnumCSE_phase) { removedStmt = fgCheckRemoveStmt(block, stmt); } // Or this is the last statement of a conditional branch that was just folded? if (!removedStmt && (stmt->GetNextStmt() == nullptr) && !fgRemoveRestOfBlock) { FoldResult const fr = fgFoldConditional(block); removedStmt = (fr == FoldResult::FOLD_REMOVED_LAST_STMT); } if (!removedStmt) { // Have to re-do the evaluation order since for example some later code does not expect constants as op1 gtSetStmtInfo(stmt); // Have to re-link the nodes for this statement fgSetStmtSeq(stmt); } #ifdef DEBUG if (verbose) { printf("%s %s tree:\n", msg, (removedStmt ? "removed" : "morphed")); gtDispTree(morph); printf("\n"); } #endif if (fgRemoveRestOfBlock) { // Remove the rest of the stmts in the block for (Statement* removeStmt : StatementList(stmt->GetNextStmt())) { fgRemoveStmt(block, removeStmt); } // The rest of block has been removed and we will always throw an exception. // // For compDbgCode, we prepend an empty BB as the firstBB, it is BBJ_NONE. // We should not convert it to a ThrowBB. if ((block != fgFirstBB) || ((fgFirstBB->bbFlags & BBF_INTERNAL) == 0)) { // Convert block to a throw bb fgConvertBBToThrowBB(block); } #ifdef DEBUG if (verbose) { printf("\n%s Block " FMT_BB " becomes a throw block.\n", msg, block->bbNum); } #endif fgRemoveRestOfBlock = false; } return removedStmt; } /***************************************************************************** * * Morph the statements of the given block. * This function should be called just once for a block. Use fgMorphBlockStmt() * for reentrant calls. */ void Compiler::fgMorphStmts(BasicBlock* block) { fgRemoveRestOfBlock = false; fgCurrentlyInUseArgTemps = hashBv::Create(this); for (Statement* const stmt : block->Statements()) { if (fgRemoveRestOfBlock) { fgRemoveStmt(block, stmt); continue; } #ifdef FEATURE_SIMD if (opts.OptimizationEnabled() && stmt->GetRootNode()->TypeGet() == TYP_FLOAT && stmt->GetRootNode()->OperGet() == GT_ASG) { fgMorphCombineSIMDFieldAssignments(block, stmt); } #endif fgMorphStmt = stmt; compCurStmt = stmt; GenTree* oldTree = stmt->GetRootNode(); #ifdef DEBUG unsigned oldHash = verbose ? gtHashValue(oldTree) : DUMMY_INIT(~0); if (verbose) { printf("\nfgMorphTree " FMT_BB ", " FMT_STMT " (before)\n", block->bbNum, stmt->GetID()); gtDispTree(oldTree); } #endif /* Morph this statement tree */ GenTree* morphedTree = fgMorphTree(oldTree); // mark any outgoing arg temps as free so we can reuse them in the next statement. fgCurrentlyInUseArgTemps->ZeroAll(); // Has fgMorphStmt been sneakily changed ? if ((stmt->GetRootNode() != oldTree) || (block != compCurBB)) { if (stmt->GetRootNode() != oldTree) { /* This must be tailcall. Ignore 'morphedTree' and carry on with the tail-call node */ morphedTree = stmt->GetRootNode(); } else { /* This must be a tailcall that caused a GCPoll to get injected. We haven't actually morphed the call yet but the flag still got set, clear it here... */ CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG morphedTree->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED; #endif } noway_assert(compTailCallUsed); noway_assert(morphedTree->gtOper == GT_CALL); GenTreeCall* call = morphedTree->AsCall(); // Could be // - a fast call made as jmp in which case block will be ending with // BBJ_RETURN (as we need epilog) and marked as containing a jmp. // - a tailcall dispatched via JIT helper, on x86, in which case // block will be ending with BBJ_THROW. // - a tail call dispatched via runtime help (IL stubs), in which // case there will not be any tailcall and the block will be ending // with BBJ_RETURN (as normal control flow) noway_assert((call->IsFastTailCall() && (compCurBB->bbJumpKind == BBJ_RETURN) && ((compCurBB->bbFlags & BBF_HAS_JMP)) != 0) || (call->IsTailCallViaJitHelper() && (compCurBB->bbJumpKind == BBJ_THROW)) || (!call->IsTailCall() && (compCurBB->bbJumpKind == BBJ_RETURN))); } #ifdef DEBUG if (compStressCompile(STRESS_CLONE_EXPR, 30)) { // Clone all the trees to stress gtCloneExpr() if (verbose) { printf("\nfgMorphTree (stressClone from):\n"); gtDispTree(morphedTree); } morphedTree = gtCloneExpr(morphedTree); noway_assert(morphedTree != nullptr); if (verbose) { printf("\nfgMorphTree (stressClone to):\n"); gtDispTree(morphedTree); } } /* If the hash value changes. we modified the tree during morphing */ if (verbose) { unsigned newHash = gtHashValue(morphedTree); if (newHash != oldHash) { printf("\nfgMorphTree " FMT_BB ", " FMT_STMT " (after)\n", block->bbNum, stmt->GetID()); gtDispTree(morphedTree); } } #endif /* Check for morphedTree as a GT_COMMA with an unconditional throw */ if (!gtIsActiveCSE_Candidate(morphedTree) && fgIsCommaThrow(morphedTree, true)) { /* Use the call as the new stmt */ morphedTree = morphedTree->AsOp()->gtOp1; noway_assert(morphedTree->gtOper == GT_CALL); noway_assert((morphedTree->gtFlags & GTF_COLON_COND) == 0); fgRemoveRestOfBlock = true; } stmt->SetRootNode(morphedTree); if (fgRemoveRestOfBlock) { continue; } /* Has the statement been optimized away */ if (fgCheckRemoveStmt(block, stmt)) { continue; } /* Check if this block ends with a conditional branch that can be folded */ if (fgFoldConditional(block) != FoldResult::FOLD_DID_NOTHING) { continue; } if (ehBlockHasExnFlowDsc(block)) { continue; } } if (fgRemoveRestOfBlock) { if ((block->bbJumpKind == BBJ_COND) || (block->bbJumpKind == BBJ_SWITCH)) { Statement* first = block->firstStmt(); noway_assert(first); Statement* lastStmt = block->lastStmt(); noway_assert(lastStmt && lastStmt->GetNextStmt() == nullptr); GenTree* last = lastStmt->GetRootNode(); if (((block->bbJumpKind == BBJ_COND) && (last->gtOper == GT_JTRUE)) || ((block->bbJumpKind == BBJ_SWITCH) && (last->gtOper == GT_SWITCH))) { GenTree* op1 = last->AsOp()->gtOp1; if (op1->OperIsCompare()) { /* Unmark the comparison node with GTF_RELOP_JMP_USED */ op1->gtFlags &= ~GTF_RELOP_JMP_USED; } lastStmt->SetRootNode(fgMorphTree(op1)); } } /* Mark block as a BBJ_THROW block */ fgConvertBBToThrowBB(block); } #if FEATURE_FASTTAILCALL GenTree* recursiveTailCall = nullptr; if (block->endsWithTailCallConvertibleToLoop(this, &recursiveTailCall)) { fgMorphRecursiveFastTailCallIntoLoop(block, recursiveTailCall->AsCall()); } #endif // Reset this back so that it doesn't leak out impacting other blocks fgRemoveRestOfBlock = false; } /***************************************************************************** * * Morph the blocks of the method. * Returns true if the basic block list is modified. * This function should be called just once. */ void Compiler::fgMorphBlocks() { #ifdef DEBUG if (verbose) { printf("\n*************** In fgMorphBlocks()\n"); } #endif /* Since fgMorphTree can be called after various optimizations to re-arrange * the nodes we need a global flag to signal if we are during the one-pass * global morphing */ fgGlobalMorph = true; // // Local assertion prop is enabled if we are optimized // optLocalAssertionProp = opts.OptimizationEnabled(); if (optLocalAssertionProp) { // // Initialize for local assertion prop // optAssertionInit(true); } if (!compEnregLocals()) { // Morph is checking if lvDoNotEnregister is already set for some optimizations. // If we are running without `CLFLG_REGVAR` flag set (`compEnregLocals() == false`) // then we already know that we won't enregister any locals and it is better to set // this flag before we start reading it. // The main reason why this flag is not set is that we are running in minOpts. lvSetMinOptsDoNotEnreg(); } /*------------------------------------------------------------------------- * Process all basic blocks in the function */ BasicBlock* block = fgFirstBB; noway_assert(block); do { #ifdef DEBUG if (verbose) { printf("\nMorphing " FMT_BB " of '%s'\n", block->bbNum, info.compFullName); } #endif if (optLocalAssertionProp) { // // Clear out any currently recorded assertion candidates // before processing each basic block, // also we must handle QMARK-COLON specially // optAssertionReset(0); } // Make the current basic block address available globally. compCurBB = block; // Process all statement trees in the basic block. fgMorphStmts(block); // Do we need to merge the result of this block into a single return block? if ((block->bbJumpKind == BBJ_RETURN) && ((block->bbFlags & BBF_HAS_JMP) == 0)) { if ((genReturnBB != nullptr) && (genReturnBB != block)) { fgMergeBlockReturn(block); } } block = block->bbNext; } while (block != nullptr); // We are done with the global morphing phase fgGlobalMorph = false; compCurBB = nullptr; // Under OSR, we no longer need to specially protect the original method entry // if (opts.IsOSR() && (fgEntryBB != nullptr) && (fgEntryBB->bbFlags & BBF_IMPORTED)) { JITDUMP("OSR: un-protecting original method entry " FMT_BB "\n", fgEntryBB->bbNum); assert(fgEntryBB->bbRefs > 0); fgEntryBB->bbRefs--; // We don't need to remember this block anymore. fgEntryBB = nullptr; } #ifdef DEBUG if (verboseTrees) { fgDispBasicBlocks(true); } #endif } //------------------------------------------------------------------------ // fgMergeBlockReturn: assign the block return value (if any) into the single return temp // and branch to the single return block. // // Arguments: // block - the block to process. // // Notes: // A block is not guaranteed to have a last stmt if its jump kind is BBJ_RETURN. // For example a method returning void could have an empty block with jump kind BBJ_RETURN. // Such blocks do materialize as part of in-lining. // // A block with jump kind BBJ_RETURN does not necessarily need to end with GT_RETURN. // It could end with a tail call or rejected tail call or monitor.exit or a GT_INTRINSIC. // For now it is safe to explicitly check whether last stmt is GT_RETURN if genReturnLocal // is BAD_VAR_NUM. // void Compiler::fgMergeBlockReturn(BasicBlock* block) { assert((block->bbJumpKind == BBJ_RETURN) && ((block->bbFlags & BBF_HAS_JMP) == 0)); assert((genReturnBB != nullptr) && (genReturnBB != block)); // TODO: Need to characterize the last top level stmt of a block ending with BBJ_RETURN. Statement* lastStmt = block->lastStmt(); GenTree* ret = (lastStmt != nullptr) ? lastStmt->GetRootNode() : nullptr; if ((ret != nullptr) && (ret->OperGet() == GT_RETURN) && ((ret->gtFlags & GTF_RET_MERGED) != 0)) { // This return was generated during epilog merging, so leave it alone } else { // We'll jump to the genReturnBB. CLANG_FORMAT_COMMENT_ANCHOR; #if !defined(TARGET_X86) if (info.compFlags & CORINFO_FLG_SYNCH) { fgConvertSyncReturnToLeave(block); } else #endif // !TARGET_X86 { block->bbJumpKind = BBJ_ALWAYS; block->bbJumpDest = genReturnBB; fgAddRefPred(genReturnBB, block); fgReturnCount--; } if (genReturnLocal != BAD_VAR_NUM) { // replace the GT_RETURN node to be a GT_ASG that stores the return value into genReturnLocal. // Method must be returning a value other than TYP_VOID. noway_assert(compMethodHasRetVal()); // This block must be ending with a GT_RETURN noway_assert(lastStmt != nullptr); noway_assert(lastStmt->GetNextStmt() == nullptr); noway_assert(ret != nullptr); // GT_RETURN must have non-null operand as the method is returning the value assigned to // genReturnLocal noway_assert(ret->OperGet() == GT_RETURN); noway_assert(ret->gtGetOp1() != nullptr); Statement* pAfterStatement = lastStmt; const DebugInfo& di = lastStmt->GetDebugInfo(); GenTree* tree = gtNewTempAssign(genReturnLocal, ret->gtGetOp1(), &pAfterStatement, di, block); if (tree->OperIsCopyBlkOp()) { tree = fgMorphCopyBlock(tree); } else if (tree->OperIsInitBlkOp()) { tree = fgMorphInitBlock(tree); } if (pAfterStatement == lastStmt) { lastStmt->SetRootNode(tree); } else { // gtNewTempAssign inserted additional statements after last fgRemoveStmt(block, lastStmt); Statement* newStmt = gtNewStmt(tree, di); fgInsertStmtAfter(block, pAfterStatement, newStmt); lastStmt = newStmt; } } else if (ret != nullptr && ret->OperGet() == GT_RETURN) { // This block ends with a GT_RETURN noway_assert(lastStmt != nullptr); noway_assert(lastStmt->GetNextStmt() == nullptr); // Must be a void GT_RETURN with null operand; delete it as this block branches to oneReturn // block noway_assert(ret->TypeGet() == TYP_VOID); noway_assert(ret->gtGetOp1() == nullptr); fgRemoveStmt(block, lastStmt); } JITDUMP("\nUpdate " FMT_BB " to jump to common return block.\n", block->bbNum); DISPBLOCK(block); if (block->hasProfileWeight()) { weight_t const oldWeight = genReturnBB->hasProfileWeight() ? genReturnBB->bbWeight : BB_ZERO_WEIGHT; weight_t const newWeight = oldWeight + block->bbWeight; JITDUMP("merging profile weight " FMT_WT " from " FMT_BB " to common return " FMT_BB "\n", block->bbWeight, block->bbNum, genReturnBB->bbNum); genReturnBB->setBBProfileWeight(newWeight); DISPBLOCK(genReturnBB); } } } /***************************************************************************** * * Make some decisions about the kind of code to generate. */ void Compiler::fgSetOptions() { #ifdef DEBUG /* Should we force fully interruptible code ? */ if (JitConfig.JitFullyInt() || compStressCompile(STRESS_GENERIC_VARN, 30)) { noway_assert(!codeGen->isGCTypeFixed()); SetInterruptible(true); } #endif if (opts.compDbgCode) { assert(!codeGen->isGCTypeFixed()); SetInterruptible(true); // debugging is easier this way ... } /* Assume we won't need an explicit stack frame if this is allowed */ if (compLocallocUsed) { codeGen->setFramePointerRequired(true); } #ifdef TARGET_X86 if (compTailCallUsed) codeGen->setFramePointerRequired(true); #endif // TARGET_X86 if (!opts.genFPopt) { codeGen->setFramePointerRequired(true); } // Assert that the EH table has been initialized by now. Note that // compHndBBtabAllocCount never decreases; it is a high-water mark // of table allocation. In contrast, compHndBBtabCount does shrink // if we delete a dead EH region, and if it shrinks to zero, the // table pointer compHndBBtab is unreliable. assert(compHndBBtabAllocCount >= info.compXcptnsCount); #ifdef TARGET_X86 // Note: this case, and the !X86 case below, should both use the // !X86 path. This would require a few more changes for X86 to use // compHndBBtabCount (the current number of EH clauses) instead of // info.compXcptnsCount (the number of EH clauses in IL), such as // in ehNeedsShadowSPslots(). This is because sometimes the IL has // an EH clause that we delete as statically dead code before we // get here, leaving no EH clauses left, and thus no requirement // to use a frame pointer because of EH. But until all the code uses // the same test, leave info.compXcptnsCount here. if (info.compXcptnsCount > 0) { codeGen->setFramePointerRequiredEH(true); } #else // !TARGET_X86 if (compHndBBtabCount > 0) { codeGen->setFramePointerRequiredEH(true); } #endif // TARGET_X86 #ifdef UNIX_X86_ABI if (info.compXcptnsCount > 0) { assert(!codeGen->isGCTypeFixed()); // Enforce fully interruptible codegen for funclet unwinding SetInterruptible(true); } #endif // UNIX_X86_ABI if (compMethodRequiresPInvokeFrame()) { codeGen->setFramePointerRequired(true); // Setup of Pinvoke frame currently requires an EBP style frame } if (info.compPublishStubParam) { codeGen->setFramePointerRequiredGCInfo(true); } if (compIsProfilerHookNeeded()) { codeGen->setFramePointerRequired(true); } if (info.compIsVarArgs) { // Code that initializes lvaVarargsBaseOfStkArgs requires this to be EBP relative. codeGen->setFramePointerRequiredGCInfo(true); } if (lvaReportParamTypeArg()) { codeGen->setFramePointerRequiredGCInfo(true); } // printf("method will %s be fully interruptible\n", GetInterruptible() ? " " : "not"); } /*****************************************************************************/ GenTree* Compiler::fgInitThisClass() { noway_assert(!compIsForInlining()); CORINFO_LOOKUP_KIND kind; info.compCompHnd->getLocationOfThisType(info.compMethodHnd, &kind); if (!kind.needsRuntimeLookup) { return fgGetSharedCCtor(info.compClassHnd); } else { #ifdef FEATURE_READYTORUN // Only CoreRT understands CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE. Don't do this on CoreCLR. if (opts.IsReadyToRun() && IsTargetAbi(CORINFO_CORERT_ABI)) { CORINFO_RESOLVED_TOKEN resolvedToken; memset(&resolvedToken, 0, sizeof(resolvedToken)); // We are in a shared method body, but maybe we don't need a runtime lookup after all. // This covers the case of a generic method on a non-generic type. if (!(info.compClassAttr & CORINFO_FLG_SHAREDINST)) { resolvedToken.hClass = info.compClassHnd; return impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_STATIC_BASE, TYP_BYREF); } // We need a runtime lookup. GenTree* ctxTree = getRuntimeContextTree(kind.runtimeLookupKind); // CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE with a zeroed out resolvedToken means "get the static // base of the class that owns the method being compiled". If we're in this method, it means we're not // inlining and there's no ambiguity. return impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE, TYP_BYREF, gtNewCallArgs(ctxTree), &kind); } #endif // Collectible types requires that for shared generic code, if we use the generic context paramter // that we report it. (This is a conservative approach, we could detect some cases particularly when the // context parameter is this that we don't need the eager reporting logic.) lvaGenericsContextInUse = true; switch (kind.runtimeLookupKind) { case CORINFO_LOOKUP_THISOBJ: { // This code takes a this pointer; but we need to pass the static method desc to get the right point in // the hierarchy GenTree* vtTree = gtNewLclvNode(info.compThisArg, TYP_REF); vtTree->gtFlags |= GTF_VAR_CONTEXT; // Vtable pointer of this object vtTree = gtNewMethodTableLookup(vtTree); GenTree* methodHnd = gtNewIconEmbMethHndNode(info.compMethodHnd); return gtNewHelperCallNode(CORINFO_HELP_INITINSTCLASS, TYP_VOID, gtNewCallArgs(vtTree, methodHnd)); } case CORINFO_LOOKUP_CLASSPARAM: { GenTree* vtTree = gtNewLclvNode(info.compTypeCtxtArg, TYP_I_IMPL); vtTree->gtFlags |= GTF_VAR_CONTEXT; return gtNewHelperCallNode(CORINFO_HELP_INITCLASS, TYP_VOID, gtNewCallArgs(vtTree)); } case CORINFO_LOOKUP_METHODPARAM: { GenTree* methHndTree = gtNewLclvNode(info.compTypeCtxtArg, TYP_I_IMPL); methHndTree->gtFlags |= GTF_VAR_CONTEXT; return gtNewHelperCallNode(CORINFO_HELP_INITINSTCLASS, TYP_VOID, gtNewCallArgs(gtNewIconNode(0), methHndTree)); } default: noway_assert(!"Unknown LOOKUP_KIND"); UNREACHABLE(); } } } #ifdef DEBUG /***************************************************************************** * * Tree walk callback to make sure no GT_QMARK nodes are present in the tree, * except for the allowed ? 1 : 0; pattern. */ Compiler::fgWalkResult Compiler::fgAssertNoQmark(GenTree** tree, fgWalkData* data) { if ((*tree)->OperGet() == GT_QMARK) { fgCheckQmarkAllowedForm(*tree); } return WALK_CONTINUE; } void Compiler::fgCheckQmarkAllowedForm(GenTree* tree) { assert(tree->OperGet() == GT_QMARK); assert(!"Qmarks beyond morph disallowed."); } /***************************************************************************** * * Verify that the importer has created GT_QMARK nodes in a way we can * process them. The following is allowed: * * 1. A top level qmark. Top level qmark is of the form: * a) (bool) ? (void) : (void) OR * b) V0N = (bool) ? (type) : (type) * * 2. Recursion is allowed at the top level, i.e., a GT_QMARK can be a child * of either op1 of colon or op2 of colon but not a child of any other * operator. */ void Compiler::fgPreExpandQmarkChecks(GenTree* expr) { GenTree* topQmark = fgGetTopLevelQmark(expr); // If the top level Qmark is null, then scan the tree to make sure // there are no qmarks within it. if (topQmark == nullptr) { fgWalkTreePre(&expr, Compiler::fgAssertNoQmark, nullptr); } else { // We could probably expand the cond node also, but don't think the extra effort is necessary, // so let's just assert the cond node of a top level qmark doesn't have further top level qmarks. fgWalkTreePre(&topQmark->AsOp()->gtOp1, Compiler::fgAssertNoQmark, nullptr); fgPreExpandQmarkChecks(topQmark->AsOp()->gtOp2->AsOp()->gtOp1); fgPreExpandQmarkChecks(topQmark->AsOp()->gtOp2->AsOp()->gtOp2); } } #endif // DEBUG /***************************************************************************** * * Get the top level GT_QMARK node in a given "expr", return NULL if such a * node is not present. If the top level GT_QMARK node is assigned to a * GT_LCL_VAR, then return the lcl node in ppDst. * */ GenTree* Compiler::fgGetTopLevelQmark(GenTree* expr, GenTree** ppDst /* = NULL */) { if (ppDst != nullptr) { *ppDst = nullptr; } GenTree* topQmark = nullptr; if (expr->gtOper == GT_QMARK) { topQmark = expr; } else if (expr->gtOper == GT_ASG && expr->AsOp()->gtOp2->gtOper == GT_QMARK && expr->AsOp()->gtOp1->gtOper == GT_LCL_VAR) { topQmark = expr->AsOp()->gtOp2; if (ppDst != nullptr) { *ppDst = expr->AsOp()->gtOp1; } } return topQmark; } /********************************************************************************* * * For a castclass helper call, * Importer creates the following tree: * tmp = (op1 == null) ? op1 : ((*op1 == (cse = op2, cse)) ? op1 : helper()); * * This method splits the qmark expression created by the importer into the * following blocks: (block, asg, cond1, cond2, helper, remainder) * Notice that op1 is the result for both the conditions. So we coalesce these * assignments into a single block instead of two blocks resulting a nested diamond. * * +---------->-----------+ * | | | * ^ ^ v * | | | * block-->asg-->cond1--+-->cond2--+-->helper--+-->remainder * * We expect to achieve the following codegen: * mov rsi, rdx tmp = op1 // asgBlock * test rsi, rsi goto skip if tmp == null ? // cond1Block * je SKIP * mov rcx, 0x76543210 cns = op2 // cond2Block * cmp qword ptr [rsi], rcx goto skip if *tmp == op2 * je SKIP * call CORINFO_HELP_CHKCASTCLASS_SPECIAL tmp = helper(cns, tmp) // helperBlock * mov rsi, rax * SKIP: // remainderBlock * tmp has the result. * */ void Compiler::fgExpandQmarkForCastInstOf(BasicBlock* block, Statement* stmt) { #ifdef DEBUG if (verbose) { printf("\nExpanding CastInstOf qmark in " FMT_BB " (before)\n", block->bbNum); fgDispBasicBlocks(block, block, true); } #endif // DEBUG GenTree* expr = stmt->GetRootNode(); GenTree* dst = nullptr; GenTree* qmark = fgGetTopLevelQmark(expr, &dst); noway_assert(dst != nullptr); assert(qmark->gtFlags & GTF_QMARK_CAST_INSTOF); // Get cond, true, false exprs for the qmark. GenTree* condExpr = qmark->gtGetOp1(); GenTree* trueExpr = qmark->gtGetOp2()->AsColon()->ThenNode(); GenTree* falseExpr = qmark->gtGetOp2()->AsColon()->ElseNode(); // Get cond, true, false exprs for the nested qmark. GenTree* nestedQmark = falseExpr; GenTree* cond2Expr; GenTree* true2Expr; GenTree* false2Expr; if (nestedQmark->gtOper == GT_QMARK) { cond2Expr = nestedQmark->gtGetOp1(); true2Expr = nestedQmark->gtGetOp2()->AsColon()->ThenNode(); false2Expr = nestedQmark->gtGetOp2()->AsColon()->ElseNode(); } else { // This is a rare case that arises when we are doing minopts and encounter isinst of null // gtFoldExpr was still is able to optimize away part of the tree (but not all). // That means it does not match our pattern. // Rather than write code to handle this case, just fake up some nodes to make it match the common // case. Synthesize a comparison that is always true, and for the result-on-true, use the // entire subtree we expected to be the nested question op. cond2Expr = gtNewOperNode(GT_EQ, TYP_INT, gtNewIconNode(0, TYP_I_IMPL), gtNewIconNode(0, TYP_I_IMPL)); true2Expr = nestedQmark; false2Expr = gtNewIconNode(0, TYP_I_IMPL); } assert(false2Expr->OperGet() == trueExpr->OperGet()); // Create the chain of blocks. See method header comment. // The order of blocks after this is the following: // block ... asgBlock ... cond1Block ... cond2Block ... helperBlock ... remainderBlock // // We need to remember flags that exist on 'block' that we want to propagate to 'remainderBlock', // if they are going to be cleared by fgSplitBlockAfterStatement(). We currently only do this only // for the GC safe point bit, the logic being that if 'block' was marked gcsafe, then surely // remainderBlock will still be GC safe. BasicBlockFlags propagateFlags = block->bbFlags & BBF_GC_SAFE_POINT; BasicBlock* remainderBlock = fgSplitBlockAfterStatement(block, stmt); fgRemoveRefPred(remainderBlock, block); // We're going to put more blocks between block and remainderBlock. BasicBlock* helperBlock = fgNewBBafter(BBJ_NONE, block, true); BasicBlock* cond2Block = fgNewBBafter(BBJ_COND, block, true); BasicBlock* cond1Block = fgNewBBafter(BBJ_COND, block, true); BasicBlock* asgBlock = fgNewBBafter(BBJ_NONE, block, true); remainderBlock->bbFlags |= propagateFlags; // These blocks are only internal if 'block' is (but they've been set as internal by fgNewBBafter). // If they're not internal, mark them as imported to avoid asserts about un-imported blocks. if ((block->bbFlags & BBF_INTERNAL) == 0) { helperBlock->bbFlags &= ~BBF_INTERNAL; cond2Block->bbFlags &= ~BBF_INTERNAL; cond1Block->bbFlags &= ~BBF_INTERNAL; asgBlock->bbFlags &= ~BBF_INTERNAL; helperBlock->bbFlags |= BBF_IMPORTED; cond2Block->bbFlags |= BBF_IMPORTED; cond1Block->bbFlags |= BBF_IMPORTED; asgBlock->bbFlags |= BBF_IMPORTED; } // Chain the flow correctly. fgAddRefPred(asgBlock, block); fgAddRefPred(cond1Block, asgBlock); fgAddRefPred(cond2Block, cond1Block); fgAddRefPred(helperBlock, cond2Block); fgAddRefPred(remainderBlock, helperBlock); fgAddRefPred(remainderBlock, cond1Block); fgAddRefPred(remainderBlock, cond2Block); cond1Block->bbJumpDest = remainderBlock; cond2Block->bbJumpDest = remainderBlock; // Set the weights; some are guesses. asgBlock->inheritWeight(block); cond1Block->inheritWeight(block); cond2Block->inheritWeightPercentage(cond1Block, 50); helperBlock->inheritWeightPercentage(cond2Block, 50); // Append cond1 as JTRUE to cond1Block GenTree* jmpTree = gtNewOperNode(GT_JTRUE, TYP_VOID, condExpr); Statement* jmpStmt = fgNewStmtFromTree(jmpTree, stmt->GetDebugInfo()); fgInsertStmtAtEnd(cond1Block, jmpStmt); // Append cond2 as JTRUE to cond2Block jmpTree = gtNewOperNode(GT_JTRUE, TYP_VOID, cond2Expr); jmpStmt = fgNewStmtFromTree(jmpTree, stmt->GetDebugInfo()); fgInsertStmtAtEnd(cond2Block, jmpStmt); // AsgBlock should get tmp = op1 assignment. trueExpr = gtNewTempAssign(dst->AsLclVarCommon()->GetLclNum(), trueExpr); Statement* trueStmt = fgNewStmtFromTree(trueExpr, stmt->GetDebugInfo()); fgInsertStmtAtEnd(asgBlock, trueStmt); // Since we are adding helper in the JTRUE false path, reverse the cond2 and add the helper. gtReverseCond(cond2Expr); GenTree* helperExpr = gtNewTempAssign(dst->AsLclVarCommon()->GetLclNum(), true2Expr); Statement* helperStmt = fgNewStmtFromTree(helperExpr, stmt->GetDebugInfo()); fgInsertStmtAtEnd(helperBlock, helperStmt); // Finally remove the nested qmark stmt. fgRemoveStmt(block, stmt); if (true2Expr->OperIs(GT_CALL) && (true2Expr->AsCall()->gtCallMoreFlags & GTF_CALL_M_DOES_NOT_RETURN)) { fgConvertBBToThrowBB(helperBlock); } #ifdef DEBUG if (verbose) { printf("\nExpanding CastInstOf qmark in " FMT_BB " (after)\n", block->bbNum); fgDispBasicBlocks(block, remainderBlock, true); } #endif // DEBUG } /***************************************************************************** * * Expand a statement with a top level qmark node. There are three cases, based * on whether the qmark has both "true" and "false" arms, or just one of them. * * S0; * C ? T : F; * S1; * * Generates ===> * * bbj_always * +---->------+ * false | | * S0 -->-- ~C -->-- T F -->-- S1 * | | * +--->--------+ * bbj_cond(true) * * ----------------------------------------- * * S0; * C ? T : NOP; * S1; * * Generates ===> * * false * S0 -->-- ~C -->-- T -->-- S1 * | | * +-->-------------+ * bbj_cond(true) * * ----------------------------------------- * * S0; * C ? NOP : F; * S1; * * Generates ===> * * false * S0 -->-- C -->-- F -->-- S1 * | | * +-->------------+ * bbj_cond(true) * * If the qmark assigns to a variable, then create tmps for "then" * and "else" results and assign the temp to the variable as a writeback step. */ void Compiler::fgExpandQmarkStmt(BasicBlock* block, Statement* stmt) { GenTree* expr = stmt->GetRootNode(); // Retrieve the Qmark node to be expanded. GenTree* dst = nullptr; GenTree* qmark = fgGetTopLevelQmark(expr, &dst); if (qmark == nullptr) { return; } if (qmark->gtFlags & GTF_QMARK_CAST_INSTOF) { fgExpandQmarkForCastInstOf(block, stmt); return; } #ifdef DEBUG if (verbose) { printf("\nExpanding top-level qmark in " FMT_BB " (before)\n", block->bbNum); fgDispBasicBlocks(block, block, true); } #endif // DEBUG // Retrieve the operands. GenTree* condExpr = qmark->gtGetOp1(); GenTree* trueExpr = qmark->gtGetOp2()->AsColon()->ThenNode(); GenTree* falseExpr = qmark->gtGetOp2()->AsColon()->ElseNode(); assert(!varTypeIsFloating(condExpr->TypeGet())); bool hasTrueExpr = (trueExpr->OperGet() != GT_NOP); bool hasFalseExpr = (falseExpr->OperGet() != GT_NOP); assert(hasTrueExpr || hasFalseExpr); // We expect to have at least one arm of the qmark! // Create remainder, cond and "else" blocks. After this, the blocks are in this order: // block ... condBlock ... elseBlock ... remainderBlock // // We need to remember flags that exist on 'block' that we want to propagate to 'remainderBlock', // if they are going to be cleared by fgSplitBlockAfterStatement(). We currently only do this only // for the GC safe point bit, the logic being that if 'block' was marked gcsafe, then surely // remainderBlock will still be GC safe. BasicBlockFlags propagateFlags = block->bbFlags & BBF_GC_SAFE_POINT; BasicBlock* remainderBlock = fgSplitBlockAfterStatement(block, stmt); fgRemoveRefPred(remainderBlock, block); // We're going to put more blocks between block and remainderBlock. BasicBlock* condBlock = fgNewBBafter(BBJ_COND, block, true); BasicBlock* elseBlock = fgNewBBafter(BBJ_NONE, condBlock, true); // These blocks are only internal if 'block' is (but they've been set as internal by fgNewBBafter). // If they're not internal, mark them as imported to avoid asserts about un-imported blocks. if ((block->bbFlags & BBF_INTERNAL) == 0) { condBlock->bbFlags &= ~BBF_INTERNAL; elseBlock->bbFlags &= ~BBF_INTERNAL; condBlock->bbFlags |= BBF_IMPORTED; elseBlock->bbFlags |= BBF_IMPORTED; } remainderBlock->bbFlags |= propagateFlags; condBlock->inheritWeight(block); fgAddRefPred(condBlock, block); fgAddRefPred(elseBlock, condBlock); fgAddRefPred(remainderBlock, elseBlock); BasicBlock* thenBlock = nullptr; if (hasTrueExpr && hasFalseExpr) { // bbj_always // +---->------+ // false | | // S0 -->-- ~C -->-- T F -->-- S1 // | | // +--->--------+ // bbj_cond(true) // gtReverseCond(condExpr); condBlock->bbJumpDest = elseBlock; thenBlock = fgNewBBafter(BBJ_ALWAYS, condBlock, true); thenBlock->bbJumpDest = remainderBlock; if ((block->bbFlags & BBF_INTERNAL) == 0) { thenBlock->bbFlags &= ~BBF_INTERNAL; thenBlock->bbFlags |= BBF_IMPORTED; } fgAddRefPred(thenBlock, condBlock); fgAddRefPred(remainderBlock, thenBlock); thenBlock->inheritWeightPercentage(condBlock, 50); elseBlock->inheritWeightPercentage(condBlock, 50); } else if (hasTrueExpr) { // false // S0 -->-- ~C -->-- T -->-- S1 // | | // +-->-------------+ // bbj_cond(true) // gtReverseCond(condExpr); condBlock->bbJumpDest = remainderBlock; fgAddRefPred(remainderBlock, condBlock); // Since we have no false expr, use the one we'd already created. thenBlock = elseBlock; elseBlock = nullptr; thenBlock->inheritWeightPercentage(condBlock, 50); } else if (hasFalseExpr) { // false // S0 -->-- C -->-- F -->-- S1 // | | // +-->------------+ // bbj_cond(true) // condBlock->bbJumpDest = remainderBlock; fgAddRefPred(remainderBlock, condBlock); elseBlock->inheritWeightPercentage(condBlock, 50); } GenTree* jmpTree = gtNewOperNode(GT_JTRUE, TYP_VOID, qmark->gtGetOp1()); Statement* jmpStmt = fgNewStmtFromTree(jmpTree, stmt->GetDebugInfo()); fgInsertStmtAtEnd(condBlock, jmpStmt); // Remove the original qmark statement. fgRemoveStmt(block, stmt); // Since we have top level qmarks, we either have a dst for it in which case // we need to create tmps for true and falseExprs, else just don't bother // assigning. unsigned lclNum = BAD_VAR_NUM; if (dst != nullptr) { assert(dst->gtOper == GT_LCL_VAR); lclNum = dst->AsLclVar()->GetLclNum(); } else { assert(qmark->TypeGet() == TYP_VOID); } if (hasTrueExpr) { if (dst != nullptr) { trueExpr = gtNewTempAssign(lclNum, trueExpr); } Statement* trueStmt = fgNewStmtFromTree(trueExpr, stmt->GetDebugInfo()); fgInsertStmtAtEnd(thenBlock, trueStmt); } // Assign the falseExpr into the dst or tmp, insert in elseBlock if (hasFalseExpr) { if (dst != nullptr) { falseExpr = gtNewTempAssign(lclNum, falseExpr); } Statement* falseStmt = fgNewStmtFromTree(falseExpr, stmt->GetDebugInfo()); fgInsertStmtAtEnd(elseBlock, falseStmt); } #ifdef DEBUG if (verbose) { printf("\nExpanding top-level qmark in " FMT_BB " (after)\n", block->bbNum); fgDispBasicBlocks(block, remainderBlock, true); } #endif // DEBUG } /***************************************************************************** * * Expand GT_QMARK nodes from the flow graph into basic blocks. * */ void Compiler::fgExpandQmarkNodes() { if (compQmarkUsed) { for (BasicBlock* const block : Blocks()) { for (Statement* const stmt : block->Statements()) { GenTree* expr = stmt->GetRootNode(); #ifdef DEBUG fgPreExpandQmarkChecks(expr); #endif fgExpandQmarkStmt(block, stmt); } } #ifdef DEBUG fgPostExpandQmarkChecks(); #endif } compQmarkRationalized = true; } #ifdef DEBUG /***************************************************************************** * * Make sure we don't have any more GT_QMARK nodes. * */ void Compiler::fgPostExpandQmarkChecks() { for (BasicBlock* const block : Blocks()) { for (Statement* const stmt : block->Statements()) { GenTree* expr = stmt->GetRootNode(); fgWalkTreePre(&expr, Compiler::fgAssertNoQmark, nullptr); } } } #endif /***************************************************************************** * * Promoting struct locals */ void Compiler::fgPromoteStructs() { #ifdef DEBUG if (verbose) { printf("*************** In fgPromoteStructs()\n"); } #endif // DEBUG if (!opts.OptEnabled(CLFLG_STRUCTPROMOTE)) { JITDUMP(" promotion opt flag not enabled\n"); return; } if (fgNoStructPromotion) { JITDUMP(" promotion disabled by JitNoStructPromotion\n"); return; } #if 0 // The code in this #if has been useful in debugging struct promotion issues, by // enabling selective enablement of the struct promotion optimization according to // method hash. #ifdef DEBUG unsigned methHash = info.compMethodHash(); char* lostr = getenv("structpromohashlo"); unsigned methHashLo = 0; if (lostr != NULL) { sscanf_s(lostr, "%x", &methHashLo); } char* histr = getenv("structpromohashhi"); unsigned methHashHi = UINT32_MAX; if (histr != NULL) { sscanf_s(histr, "%x", &methHashHi); } if (methHash < methHashLo || methHash > methHashHi) { return; } else { printf("Promoting structs for method %s, hash = 0x%x.\n", info.compFullName, info.compMethodHash()); printf(""); // in our logic this causes a flush } #endif // DEBUG #endif // 0 if (info.compIsVarArgs) { JITDUMP(" promotion disabled because of varargs\n"); return; } #ifdef DEBUG if (verbose) { printf("\nlvaTable before fgPromoteStructs\n"); lvaTableDump(); } #endif // DEBUG // The lvaTable might grow as we grab temps. Make a local copy here. unsigned startLvaCount = lvaCount; // // Loop through the original lvaTable. Looking for struct locals to be promoted. // lvaStructPromotionInfo structPromotionInfo; bool tooManyLocalsReported = false; // Clear the structPromotionHelper, since it is used during inlining, at which point it // may be conservative about looking up SIMD info. // We don't want to preserve those conservative decisions for the actual struct promotion. structPromotionHelper->Clear(); for (unsigned lclNum = 0; lclNum < startLvaCount; lclNum++) { // Whether this var got promoted bool promotedVar = false; LclVarDsc* varDsc = lvaGetDesc(lclNum); // If we have marked this as lvUsedInSIMDIntrinsic, then we do not want to promote // its fields. Instead, we will attempt to enregister the entire struct. if (varDsc->lvIsSIMDType() && (varDsc->lvIsUsedInSIMDIntrinsic() || isOpaqueSIMDLclVar(varDsc))) { varDsc->lvRegStruct = true; } // Don't promote if we have reached the tracking limit. else if (lvaHaveManyLocals()) { // Print the message first time when we detected this condition if (!tooManyLocalsReported) { JITDUMP("Stopped promoting struct fields, due to too many locals.\n"); } tooManyLocalsReported = true; } else if (varTypeIsStruct(varDsc)) { assert(structPromotionHelper != nullptr); promotedVar = structPromotionHelper->TryPromoteStructVar(lclNum); } if (!promotedVar && varDsc->lvIsSIMDType() && !varDsc->lvFieldAccessed) { // Even if we have not used this in a SIMD intrinsic, if it is not being promoted, // we will treat it as a reg struct. varDsc->lvRegStruct = true; } } #ifdef DEBUG if (verbose) { printf("\nlvaTable after fgPromoteStructs\n"); lvaTableDump(); } #endif // DEBUG } void Compiler::fgMorphStructField(GenTree* tree, GenTree* parent) { noway_assert(tree->OperGet() == GT_FIELD); GenTreeField* field = tree->AsField(); GenTree* objRef = field->GetFldObj(); GenTree* obj = ((objRef != nullptr) && (objRef->gtOper == GT_ADDR)) ? objRef->AsOp()->gtOp1 : nullptr; noway_assert((tree->gtFlags & GTF_GLOB_REF) || ((obj != nullptr) && (obj->gtOper == GT_LCL_VAR))); /* Is this an instance data member? */ if ((obj != nullptr) && (obj->gtOper == GT_LCL_VAR)) { unsigned lclNum = obj->AsLclVarCommon()->GetLclNum(); const LclVarDsc* varDsc = lvaGetDesc(lclNum); if (varTypeIsStruct(obj)) { if (varDsc->lvPromoted) { // Promoted struct unsigned fldOffset = field->gtFldOffset; unsigned fieldLclIndex = lvaGetFieldLocal(varDsc, fldOffset); if (fieldLclIndex == BAD_VAR_NUM) { // Access a promoted struct's field with an offset that doesn't correspond to any field. // It can happen if the struct was cast to another struct with different offsets. return; } const LclVarDsc* fieldDsc = lvaGetDesc(fieldLclIndex); var_types fieldType = fieldDsc->TypeGet(); assert(fieldType != TYP_STRUCT); // promoted LCL_VAR can't have a struct type. if (tree->TypeGet() != fieldType) { if (tree->TypeGet() != TYP_STRUCT) { // This is going to be an incorrect instruction promotion. // For example when we try to read int as long. return; } if (field->gtFldHnd != fieldDsc->lvFieldHnd) { CORINFO_CLASS_HANDLE fieldTreeClass = nullptr, fieldDscClass = nullptr; CorInfoType fieldTreeType = info.compCompHnd->getFieldType(field->gtFldHnd, &fieldTreeClass); CorInfoType fieldDscType = info.compCompHnd->getFieldType(fieldDsc->lvFieldHnd, &fieldDscClass); if (fieldTreeType != fieldDscType || fieldTreeClass != fieldDscClass) { // Access the promoted field with a different class handle, can't check that types match. return; } // Access the promoted field as a field of a non-promoted struct with the same class handle. } else { // As we already checked this above, we must have a tree with a TYP_STRUCT type // assert(tree->TypeGet() == TYP_STRUCT); // The field tree accesses it as a struct, but the promoted LCL_VAR field // says that it has another type. This happens when struct promotion unwraps // a single field struct to get to its ultimate type. // // Note that currently, we cannot have a promoted LCL_VAR field with a struct type. // // This mismatch in types can lead to problems for some parent node type like GT_RETURN. // So we check the parent node and only allow this optimization when we have // a GT_ADDR or a GT_ASG. // // Note that for a GT_ASG we have to do some additional work, // see below after the SetOper(GT_LCL_VAR) // if (!parent->OperIs(GT_ADDR, GT_ASG)) { // Don't transform other operations such as GT_RETURN // return; } #ifdef DEBUG // This is an additional DEBUG-only sanity check // assert(structPromotionHelper != nullptr); structPromotionHelper->CheckRetypedAsScalar(field->gtFldHnd, fieldType); #endif // DEBUG } } tree->SetOper(GT_LCL_VAR); tree->AsLclVarCommon()->SetLclNum(fieldLclIndex); tree->gtType = fieldType; tree->gtFlags &= GTF_NODE_MASK; // Note: that clears all flags except `GTF_COLON_COND`. if (parent->gtOper == GT_ASG) { // If we are changing the left side of an assignment, we need to set // these two flags: // if (parent->AsOp()->gtOp1 == tree) { tree->gtFlags |= GTF_VAR_DEF; tree->gtFlags |= GTF_DONT_CSE; } // Promotion of struct containing struct fields where the field // is a struct with a single pointer sized scalar type field: in // this case struct promotion uses the type of the underlying // scalar field as the type of struct field instead of recursively // promoting. This can lead to a case where we have a block-asgn // with its RHS replaced with a scalar type. Mark RHS value as // DONT_CSE so that assertion prop will not do const propagation. // The reason this is required is that if RHS of a block-asg is a // constant, then it is interpreted as init-block incorrectly. // // TODO - This can also be avoided if we implement recursive struct // promotion, tracked by #10019. if (varTypeIsStruct(parent) && parent->AsOp()->gtOp2 == tree && !varTypeIsStruct(tree)) { tree->gtFlags |= GTF_DONT_CSE; } } #ifdef DEBUG if (verbose) { printf("Replacing the field in promoted struct with local var V%02u\n", fieldLclIndex); } #endif // DEBUG } } else { // Normed struct // A "normed struct" is a struct that the VM tells us is a basic type. This can only happen if // the struct contains a single element, and that element is 4 bytes (on x64 it can also be 8 // bytes). Normally, the type of the local var and the type of GT_FIELD are equivalent. However, // there is one extremely rare case where that won't be true. An enum type is a special value type // that contains exactly one element of a primitive integer type (that, for CLS programs is named // "value__"). The VM tells us that a local var of that enum type is the primitive type of the // enum's single field. It turns out that it is legal for IL to access this field using ldflda or // ldfld. For example: // // .class public auto ansi sealed mynamespace.e_t extends [mscorlib]System.Enum // { // .field public specialname rtspecialname int16 value__ // .field public static literal valuetype mynamespace.e_t one = int16(0x0000) // } // .method public hidebysig static void Main() cil managed // { // .locals init (valuetype mynamespace.e_t V_0) // ... // ldloca.s V_0 // ldflda int16 mynamespace.e_t::value__ // ... // } // // Normally, compilers will not generate the ldflda, since it is superfluous. // // In the example, the lclVar is short, but the JIT promotes all trees using this local to the // "actual type", that is, INT. But the GT_FIELD is still SHORT. So, in the case of a type // mismatch like this, don't do this morphing. The local var may end up getting marked as // address taken, and the appropriate SHORT load will be done from memory in that case. if (tree->TypeGet() == obj->TypeGet()) { tree->ChangeOper(GT_LCL_VAR); tree->AsLclVarCommon()->SetLclNum(lclNum); tree->gtFlags &= GTF_NODE_MASK; if ((parent->gtOper == GT_ASG) && (parent->AsOp()->gtOp1 == tree)) { tree->gtFlags |= GTF_VAR_DEF; tree->gtFlags |= GTF_DONT_CSE; } #ifdef DEBUG if (verbose) { printf("Replacing the field in normed struct with local var V%02u\n", lclNum); } #endif // DEBUG } } } } void Compiler::fgMorphLocalField(GenTree* tree, GenTree* parent) { noway_assert(tree->OperGet() == GT_LCL_FLD); unsigned lclNum = tree->AsLclFld()->GetLclNum(); LclVarDsc* varDsc = lvaGetDesc(lclNum); if (varTypeIsStruct(varDsc)) { if (varDsc->lvPromoted) { // Promoted struct unsigned fldOffset = tree->AsLclFld()->GetLclOffs(); unsigned fieldLclIndex = 0; LclVarDsc* fldVarDsc = nullptr; if (fldOffset != BAD_VAR_NUM) { fieldLclIndex = lvaGetFieldLocal(varDsc, fldOffset); noway_assert(fieldLclIndex != BAD_VAR_NUM); fldVarDsc = lvaGetDesc(fieldLclIndex); } var_types treeType = tree->TypeGet(); var_types fieldType = fldVarDsc->TypeGet(); if (fldOffset != BAD_VAR_NUM && ((genTypeSize(fieldType) == genTypeSize(treeType)) || (varDsc->lvFieldCnt == 1))) { // There is an existing sub-field we can use. tree->AsLclFld()->SetLclNum(fieldLclIndex); // The field must be an enregisterable type; otherwise it would not be a promoted field. // The tree type may not match, e.g. for return types that have been morphed, but both // must be enregisterable types. assert(varTypeIsEnregisterable(treeType) && varTypeIsEnregisterable(fieldType)); tree->ChangeOper(GT_LCL_VAR); assert(tree->AsLclVarCommon()->GetLclNum() == fieldLclIndex); tree->gtType = fldVarDsc->TypeGet(); if ((parent->gtOper == GT_ASG) && (parent->AsOp()->gtOp1 == tree)) { tree->gtFlags |= GTF_VAR_DEF; tree->gtFlags |= GTF_DONT_CSE; } JITDUMP("Replacing the GT_LCL_FLD in promoted struct with local var V%02u\n", fieldLclIndex); } else { // There is no existing field that has all the parts that we need // So we must ensure that the struct lives in memory. lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::LocalField)); #ifdef DEBUG // We can't convert this guy to a float because he really does have his // address taken.. varDsc->lvKeepType = 1; #endif // DEBUG } } else if (varTypeIsSIMD(varDsc) && (genTypeSize(tree->TypeGet()) == genTypeSize(varDsc))) { assert(tree->AsLclFld()->GetLclOffs() == 0); tree->gtType = varDsc->TypeGet(); tree->ChangeOper(GT_LCL_VAR); JITDUMP("Replacing GT_LCL_FLD of struct with local var V%02u\n", lclNum); } } } //------------------------------------------------------------------------ // fgResetImplicitByRefRefCount: Clear the ref count field of all implicit byrefs void Compiler::fgResetImplicitByRefRefCount() { #if (defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI)) || defined(TARGET_ARM64) #ifdef DEBUG if (verbose) { printf("\n*************** In fgResetImplicitByRefRefCount()\n"); } #endif // DEBUG for (unsigned lclNum = 0; lclNum < info.compArgsCount; ++lclNum) { LclVarDsc* varDsc = lvaGetDesc(lclNum); if (varDsc->lvIsImplicitByRef) { // Clear the ref count field; fgMarkAddressTakenLocals will increment it per // appearance of implicit-by-ref param so that call arg morphing can do an // optimization for single-use implicit-by-ref params whose single use is as // an outgoing call argument. varDsc->setLvRefCnt(0, RCS_EARLY); varDsc->setLvRefCntWtd(0, RCS_EARLY); } } #endif // (TARGET_AMD64 && !UNIX_AMD64_ABI) || TARGET_ARM64 } //------------------------------------------------------------------------ // fgRetypeImplicitByRefArgs: Update the types on implicit byref parameters' `LclVarDsc`s (from // struct to pointer). Also choose (based on address-exposed analysis) // which struct promotions of implicit byrefs to keep or discard. // For those which are kept, insert the appropriate initialization code. // For those which are to be discarded, annotate the promoted field locals // so that fgMorphImplicitByRefArgs will know to rewrite their appearances // using indirections off the pointer parameters. void Compiler::fgRetypeImplicitByRefArgs() { #if (defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI)) || defined(TARGET_ARM64) #ifdef DEBUG if (verbose) { printf("\n*************** In fgRetypeImplicitByRefArgs()\n"); } #endif // DEBUG for (unsigned lclNum = 0; lclNum < info.compArgsCount; lclNum++) { LclVarDsc* varDsc = lvaGetDesc(lclNum); if (lvaIsImplicitByRefLocal(lclNum)) { unsigned size; if (varDsc->lvSize() > REGSIZE_BYTES) { size = varDsc->lvSize(); } else { CORINFO_CLASS_HANDLE typeHnd = varDsc->GetStructHnd(); size = info.compCompHnd->getClassSize(typeHnd); } if (varDsc->lvPromoted) { // This implicit-by-ref was promoted; create a new temp to represent the // promoted struct before rewriting this parameter as a pointer. unsigned newLclNum = lvaGrabTemp(false DEBUGARG("Promoted implicit byref")); lvaSetStruct(newLclNum, lvaGetStruct(lclNum), true); if (info.compIsVarArgs) { lvaSetStructUsedAsVarArg(newLclNum); } // Update varDsc since lvaGrabTemp might have re-allocated the var dsc array. varDsc = lvaGetDesc(lclNum); // Copy the struct promotion annotations to the new temp. LclVarDsc* newVarDsc = lvaGetDesc(newLclNum); newVarDsc->lvPromoted = true; newVarDsc->lvFieldLclStart = varDsc->lvFieldLclStart; newVarDsc->lvFieldCnt = varDsc->lvFieldCnt; newVarDsc->lvContainsHoles = varDsc->lvContainsHoles; newVarDsc->lvCustomLayout = varDsc->lvCustomLayout; #ifdef DEBUG newVarDsc->lvKeepType = true; #endif // DEBUG // Propagate address-taken-ness and do-not-enregister-ness. newVarDsc->SetAddressExposed(varDsc->IsAddressExposed() DEBUGARG(varDsc->GetAddrExposedReason())); newVarDsc->lvDoNotEnregister = varDsc->lvDoNotEnregister; newVarDsc->lvLiveInOutOfHndlr = varDsc->lvLiveInOutOfHndlr; newVarDsc->lvSingleDef = varDsc->lvSingleDef; newVarDsc->lvSingleDefRegCandidate = varDsc->lvSingleDefRegCandidate; newVarDsc->lvSpillAtSingleDef = varDsc->lvSpillAtSingleDef; #ifdef DEBUG newVarDsc->SetDoNotEnregReason(varDsc->GetDoNotEnregReason()); #endif // DEBUG // If the promotion is dependent, the promoted temp would just be committed // to memory anyway, so we'll rewrite its appearances to be indirections // through the pointer parameter, the same as we'd do for this // parameter if it weren't promoted at all (otherwise the initialization // of the new temp would just be a needless memcpy at method entry). // // Otherwise, see how many appearances there are. We keep two early ref counts: total // number of references to the struct or some field, and how many of these are // arguments to calls. We undo promotion unless we see enough non-call uses. // const unsigned totalAppearances = varDsc->lvRefCnt(RCS_EARLY); const unsigned callAppearances = (unsigned)varDsc->lvRefCntWtd(RCS_EARLY); assert(totalAppearances >= callAppearances); const unsigned nonCallAppearances = totalAppearances - callAppearances; bool undoPromotion = ((lvaGetPromotionType(newVarDsc) == PROMOTION_TYPE_DEPENDENT) || (nonCallAppearances <= varDsc->lvFieldCnt)); #ifdef DEBUG // Above is a profitability heurisic; either value of // undoPromotion should lead to correct code. So, // under stress, make different decisions at times. if (compStressCompile(STRESS_BYREF_PROMOTION, 25)) { undoPromotion = !undoPromotion; JITDUMP("Stress -- changing byref undo promotion for V%02u to %s undo\n", lclNum, undoPromotion ? "" : "NOT"); } #endif // DEBUG JITDUMP("%s promotion of implicit by-ref V%02u: %s total: %u non-call: %u fields: %u\n", undoPromotion ? "Undoing" : "Keeping", lclNum, (lvaGetPromotionType(newVarDsc) == PROMOTION_TYPE_DEPENDENT) ? "dependent;" : "", totalAppearances, nonCallAppearances, varDsc->lvFieldCnt); if (!undoPromotion) { // Insert IR that initializes the temp from the parameter. // LHS is a simple reference to the temp. fgEnsureFirstBBisScratch(); GenTree* lhs = gtNewLclvNode(newLclNum, varDsc->lvType); // RHS is an indirection (using GT_OBJ) off the parameter. GenTree* addr = gtNewLclvNode(lclNum, TYP_BYREF); GenTree* rhs = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, addr, typGetBlkLayout(size)); GenTree* assign = gtNewAssignNode(lhs, rhs); fgNewStmtAtBeg(fgFirstBB, assign); } // Update the locals corresponding to the promoted fields. unsigned fieldLclStart = varDsc->lvFieldLclStart; unsigned fieldCount = varDsc->lvFieldCnt; unsigned fieldLclStop = fieldLclStart + fieldCount; for (unsigned fieldLclNum = fieldLclStart; fieldLclNum < fieldLclStop; ++fieldLclNum) { LclVarDsc* fieldVarDsc = lvaGetDesc(fieldLclNum); if (undoPromotion) { // Leave lvParentLcl pointing to the parameter so that fgMorphImplicitByRefArgs // will know to rewrite appearances of this local. assert(fieldVarDsc->lvParentLcl == lclNum); } else { // Set the new parent. fieldVarDsc->lvParentLcl = newLclNum; } fieldVarDsc->lvIsParam = false; // The fields shouldn't inherit any register preferences from // the parameter which is really a pointer to the struct. fieldVarDsc->lvIsRegArg = false; fieldVarDsc->lvIsMultiRegArg = false; fieldVarDsc->SetArgReg(REG_NA); #if FEATURE_MULTIREG_ARGS fieldVarDsc->SetOtherArgReg(REG_NA); #endif } // Hijack lvFieldLclStart to record the new temp number. // It will get fixed up in fgMarkDemotedImplicitByRefArgs. varDsc->lvFieldLclStart = newLclNum; // Go ahead and clear lvFieldCnt -- either we're promoting // a replacement temp or we're not promoting this arg, and // in either case the parameter is now a pointer that doesn't // have these fields. varDsc->lvFieldCnt = 0; // Hijack lvPromoted to communicate to fgMorphImplicitByRefArgs // whether references to the struct should be rewritten as // indirections off the pointer (not promoted) or references // to the new struct local (promoted). varDsc->lvPromoted = !undoPromotion; } else { // The "undo promotion" path above clears lvPromoted for args that struct // promotion wanted to promote but that aren't considered profitable to // rewrite. It hijacks lvFieldLclStart to communicate to // fgMarkDemotedImplicitByRefArgs that it needs to clean up annotations left // on such args for fgMorphImplicitByRefArgs to consult in the interim. // Here we have an arg that was simply never promoted, so make sure it doesn't // have nonzero lvFieldLclStart, since that would confuse fgMorphImplicitByRefArgs // and fgMarkDemotedImplicitByRefArgs. assert(varDsc->lvFieldLclStart == 0); } // Since the parameter in this position is really a pointer, its type is TYP_BYREF. varDsc->lvType = TYP_BYREF; // Since this previously was a TYP_STRUCT and we have changed it to a TYP_BYREF // make sure that the following flag is not set as these will force SSA to // exclude tracking/enregistering these LclVars. (see SsaBuilder::IncludeInSsa) // varDsc->lvOverlappingFields = 0; // This flag could have been set, clear it. // The struct parameter may have had its address taken, but the pointer parameter // cannot -- any uses of the struct parameter's address are uses of the pointer // parameter's value, and there's no way for the MSIL to reference the pointer // parameter's address. So clear the address-taken bit for the parameter. varDsc->CleanAddressExposed(); varDsc->lvDoNotEnregister = 0; #ifdef DEBUG // This should not be converted to a double in stress mode, // because it is really a pointer varDsc->lvKeepType = 1; if (verbose) { printf("Changing the lvType for struct parameter V%02d to TYP_BYREF.\n", lclNum); } #endif // DEBUG } } #endif // (TARGET_AMD64 && !UNIX_AMD64_ABI) || TARGET_ARM64 } //------------------------------------------------------------------------ // fgMarkDemotedImplicitByRefArgs: Clear annotations for any implicit byrefs that struct promotion // asked to promote. Appearances of these have now been rewritten // (by fgMorphImplicitByRefArgs) using indirections from the pointer // parameter or references to the promotion temp, as appropriate. void Compiler::fgMarkDemotedImplicitByRefArgs() { JITDUMP("\n*************** In fgMarkDemotedImplicitByRefArgs()\n"); #if (defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI)) || defined(TARGET_ARM64) for (unsigned lclNum = 0; lclNum < info.compArgsCount; lclNum++) { LclVarDsc* varDsc = lvaGetDesc(lclNum); if (lvaIsImplicitByRefLocal(lclNum)) { JITDUMP("Clearing annotation for V%02d\n", lclNum); if (varDsc->lvPromoted) { // The parameter is simply a pointer now, so clear lvPromoted. It was left set // by fgRetypeImplicitByRefArgs to communicate to fgMorphImplicitByRefArgs that // appearances of this arg needed to be rewritten to a new promoted struct local. varDsc->lvPromoted = false; // Clear the lvFieldLclStart value that was set by fgRetypeImplicitByRefArgs // to tell fgMorphImplicitByRefArgs which local is the new promoted struct one. varDsc->lvFieldLclStart = 0; } else if (varDsc->lvFieldLclStart != 0) { // We created new temps to represent a promoted struct corresponding to this // parameter, but decided not to go through with the promotion and have // rewritten all uses as indirections off the pointer parameter. // We stashed the pointer to the new struct temp in lvFieldLclStart; make // note of that and clear the annotation. unsigned structLclNum = varDsc->lvFieldLclStart; varDsc->lvFieldLclStart = 0; // The temp struct is now unused; set flags appropriately so that we // won't allocate space for it on the stack. LclVarDsc* structVarDsc = lvaGetDesc(structLclNum); structVarDsc->CleanAddressExposed(); #ifdef DEBUG structVarDsc->lvUnusedStruct = true; structVarDsc->lvUndoneStructPromotion = true; #endif // DEBUG unsigned fieldLclStart = structVarDsc->lvFieldLclStart; unsigned fieldCount = structVarDsc->lvFieldCnt; unsigned fieldLclStop = fieldLclStart + fieldCount; for (unsigned fieldLclNum = fieldLclStart; fieldLclNum < fieldLclStop; ++fieldLclNum) { JITDUMP("Fixing pointer for field V%02d from V%02d to V%02d\n", fieldLclNum, lclNum, structLclNum); // Fix the pointer to the parent local. LclVarDsc* fieldVarDsc = lvaGetDesc(fieldLclNum); assert(fieldVarDsc->lvParentLcl == lclNum); fieldVarDsc->lvParentLcl = structLclNum; // The field local is now unused; set flags appropriately so that // we won't allocate stack space for it. fieldVarDsc->CleanAddressExposed(); } } } } #endif // (TARGET_AMD64 && !UNIX_AMD64_ABI) || TARGET_ARM64 } /***************************************************************************** * * Morph irregular parameters * for x64 and ARM64 this means turning them into byrefs, adding extra indirs. */ bool Compiler::fgMorphImplicitByRefArgs(GenTree* tree) { #if (!defined(TARGET_AMD64) || defined(UNIX_AMD64_ABI)) && !defined(TARGET_ARM64) return false; #else // (TARGET_AMD64 && !UNIX_AMD64_ABI) || TARGET_ARM64 bool changed = false; // Implicit byref morphing needs to know if the reference to the parameter is a // child of GT_ADDR or not, so this method looks one level down and does the // rewrite whenever a child is a reference to an implicit byref parameter. if (tree->gtOper == GT_ADDR) { if (tree->AsOp()->gtOp1->gtOper == GT_LCL_VAR) { GenTree* morphedTree = fgMorphImplicitByRefArgs(tree, true); changed = (morphedTree != nullptr); assert(!changed || (morphedTree == tree)); } } else { for (GenTree** pTree : tree->UseEdges()) { GenTree** pTreeCopy = pTree; GenTree* childTree = *pTree; if (childTree->gtOper == GT_LCL_VAR) { GenTree* newChildTree = fgMorphImplicitByRefArgs(childTree, false); if (newChildTree != nullptr) { changed = true; *pTreeCopy = newChildTree; } } } } return changed; #endif // (TARGET_AMD64 && !UNIX_AMD64_ABI) || TARGET_ARM64 } GenTree* Compiler::fgMorphImplicitByRefArgs(GenTree* tree, bool isAddr) { assert((tree->gtOper == GT_LCL_VAR) || ((tree->gtOper == GT_ADDR) && (tree->AsOp()->gtOp1->gtOper == GT_LCL_VAR))); assert(isAddr == (tree->gtOper == GT_ADDR)); GenTree* lclVarTree = isAddr ? tree->AsOp()->gtOp1 : tree; unsigned lclNum = lclVarTree->AsLclVarCommon()->GetLclNum(); LclVarDsc* lclVarDsc = lvaGetDesc(lclNum); CORINFO_FIELD_HANDLE fieldHnd; unsigned fieldOffset = 0; var_types fieldRefType = TYP_UNKNOWN; if (lvaIsImplicitByRefLocal(lclNum)) { // The SIMD transformation to coalesce contiguous references to SIMD vector fields will // re-invoke the traversal to mark address-taken locals. // So, we may encounter a tree that has already been transformed to TYP_BYREF. // If we do, leave it as-is. if (!varTypeIsStruct(lclVarTree)) { assert(lclVarTree->TypeGet() == TYP_BYREF); return nullptr; } else if (lclVarDsc->lvPromoted) { // fgRetypeImplicitByRefArgs created a new promoted struct local to represent this // arg. Rewrite this to refer to the new local. assert(lclVarDsc->lvFieldLclStart != 0); lclVarTree->AsLclVarCommon()->SetLclNum(lclVarDsc->lvFieldLclStart); return tree; } fieldHnd = nullptr; } else if (lclVarDsc->lvIsStructField && lvaIsImplicitByRefLocal(lclVarDsc->lvParentLcl)) { // This was a field reference to an implicit-by-reference struct parameter that was // dependently promoted; update it to a field reference off the pointer. // Grab the field handle from the struct field lclVar. fieldHnd = lclVarDsc->lvFieldHnd; fieldOffset = lclVarDsc->lvFldOffset; assert(fieldHnd != nullptr); // Update lclNum/lclVarDsc to refer to the parameter lclNum = lclVarDsc->lvParentLcl; lclVarDsc = lvaGetDesc(lclNum); fieldRefType = lclVarTree->TypeGet(); } else { // We only need to tranform the 'marked' implicit by ref parameters return nullptr; } // This is no longer a def of the lclVar, even if it WAS a def of the struct. lclVarTree->gtFlags &= ~(GTF_LIVENESS_MASK); if (isAddr) { if (fieldHnd == nullptr) { // change &X into just plain X tree->ReplaceWith(lclVarTree, this); tree->gtType = TYP_BYREF; } else { // change &(X.f) [i.e. GT_ADDR of local for promoted arg field] // into &(X, f) [i.e. GT_ADDR of GT_FIELD off ptr param] lclVarTree->AsLclVarCommon()->SetLclNum(lclNum); lclVarTree->gtType = TYP_BYREF; tree->AsOp()->gtOp1 = gtNewFieldRef(fieldRefType, fieldHnd, lclVarTree, fieldOffset); } #ifdef DEBUG if (verbose) { printf("Replacing address of implicit by ref struct parameter with byref:\n"); } #endif // DEBUG } else { // Change X into OBJ(X) or FIELD(X, f) var_types structType = tree->gtType; tree->gtType = TYP_BYREF; if (fieldHnd) { tree->AsLclVarCommon()->SetLclNum(lclNum); tree = gtNewFieldRef(fieldRefType, fieldHnd, tree, fieldOffset); } else { tree = gtNewObjNode(lclVarDsc->GetStructHnd(), tree); if (structType == TYP_STRUCT) { gtSetObjGcInfo(tree->AsObj()); } } // TODO-CQ: If the VM ever stops violating the ABI and passing heap references // we could remove TGTANYWHERE tree->gtFlags = ((tree->gtFlags & GTF_COMMON_MASK) | GTF_IND_TGTANYWHERE); #ifdef DEBUG if (verbose) { printf("Replacing value of implicit by ref struct parameter with indir of parameter:\n"); } #endif // DEBUG } #ifdef DEBUG if (verbose) { gtDispTree(tree); } #endif // DEBUG return tree; } //------------------------------------------------------------------------ // fgAddFieldSeqForZeroOffset: // Associate a fieldSeq (with a zero offset) with the GenTree node 'addr' // // Arguments: // addr - A GenTree node // fieldSeqZero - a fieldSeq (with a zero offset) // // Notes: // Some GenTree nodes have internal fields that record the field sequence. // If we have one of these nodes: GT_CNS_INT, GT_LCL_FLD // we can append the field sequence using the gtFieldSeq // If we have a GT_ADD of a GT_CNS_INT we can use the // fieldSeq from child node. // Otherwise we record 'fieldSeqZero' in the GenTree node using // a Map: GetFieldSeqStore() // When doing so we take care to preserve any existing zero field sequence // void Compiler::fgAddFieldSeqForZeroOffset(GenTree* addr, FieldSeqNode* fieldSeqZero) { // We expect 'addr' to be an address at this point. assert(addr->TypeGet() == TYP_BYREF || addr->TypeGet() == TYP_I_IMPL || addr->TypeGet() == TYP_REF); // Tunnel through any commas. const bool commaOnly = true; addr = addr->gtEffectiveVal(commaOnly); // We still expect 'addr' to be an address at this point. assert(addr->TypeGet() == TYP_BYREF || addr->TypeGet() == TYP_I_IMPL || addr->TypeGet() == TYP_REF); FieldSeqNode* fieldSeqUpdate = fieldSeqZero; GenTree* fieldSeqNode = addr; bool fieldSeqRecorded = false; #ifdef DEBUG if (verbose) { printf("\nfgAddFieldSeqForZeroOffset for"); gtDispAnyFieldSeq(fieldSeqZero); printf("\naddr (Before)\n"); gtDispNode(addr, nullptr, nullptr, false); gtDispCommonEndLine(addr); } #endif // DEBUG switch (addr->OperGet()) { case GT_CNS_INT: fieldSeqUpdate = GetFieldSeqStore()->Append(addr->AsIntCon()->gtFieldSeq, fieldSeqZero); addr->AsIntCon()->gtFieldSeq = fieldSeqUpdate; fieldSeqRecorded = true; break; case GT_ADDR: if (addr->AsOp()->gtOp1->OperGet() == GT_LCL_FLD) { fieldSeqNode = addr->AsOp()->gtOp1; GenTreeLclFld* lclFld = addr->AsOp()->gtOp1->AsLclFld(); fieldSeqUpdate = GetFieldSeqStore()->Append(lclFld->GetFieldSeq(), fieldSeqZero); lclFld->SetFieldSeq(fieldSeqUpdate); fieldSeqRecorded = true; } break; case GT_ADD: if (addr->AsOp()->gtOp1->OperGet() == GT_CNS_INT) { fieldSeqNode = addr->AsOp()->gtOp1; fieldSeqUpdate = GetFieldSeqStore()->Append(addr->AsOp()->gtOp1->AsIntCon()->gtFieldSeq, fieldSeqZero); addr->AsOp()->gtOp1->AsIntCon()->gtFieldSeq = fieldSeqUpdate; fieldSeqRecorded = true; } else if (addr->AsOp()->gtOp2->OperGet() == GT_CNS_INT) { fieldSeqNode = addr->AsOp()->gtOp2; fieldSeqUpdate = GetFieldSeqStore()->Append(addr->AsOp()->gtOp2->AsIntCon()->gtFieldSeq, fieldSeqZero); addr->AsOp()->gtOp2->AsIntCon()->gtFieldSeq = fieldSeqUpdate; fieldSeqRecorded = true; } break; default: break; } if (fieldSeqRecorded == false) { // Record in the general zero-offset map. // The "addr" node might already be annotated with a zero-offset field sequence. FieldSeqNode* existingFieldSeq = nullptr; if (GetZeroOffsetFieldMap()->Lookup(addr, &existingFieldSeq)) { // Append the zero field sequences fieldSeqUpdate = GetFieldSeqStore()->Append(existingFieldSeq, fieldSeqZero); } // Overwrite the field sequence annotation for op1 GetZeroOffsetFieldMap()->Set(addr, fieldSeqUpdate, NodeToFieldSeqMap::Overwrite); fieldSeqRecorded = true; } #ifdef DEBUG if (verbose) { printf(" (After)\n"); gtDispNode(fieldSeqNode, nullptr, nullptr, false); gtDispCommonEndLine(fieldSeqNode); } #endif // DEBUG } #ifdef FEATURE_SIMD //----------------------------------------------------------------------------------- // fgMorphCombineSIMDFieldAssignments: // If the RHS of the input stmt is a read for simd vector X Field, then this function // will keep reading next few stmts based on the vector size(2, 3, 4). // If the next stmts LHS are located contiguous and RHS are also located // contiguous, then we replace those statements with a copyblk. // // Argument: // block - BasicBlock*. block which stmt belongs to // stmt - Statement*. the stmt node we want to check // // return value: // if this funciton successfully optimized the stmts, then return true. Otherwise // return false; bool Compiler::fgMorphCombineSIMDFieldAssignments(BasicBlock* block, Statement* stmt) { GenTree* tree = stmt->GetRootNode(); assert(tree->OperGet() == GT_ASG); GenTree* originalLHS = tree->AsOp()->gtOp1; GenTree* prevLHS = tree->AsOp()->gtOp1; GenTree* prevRHS = tree->AsOp()->gtOp2; unsigned index = 0; CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF; unsigned simdSize = 0; GenTree* simdStructNode = getSIMDStructFromField(prevRHS, &simdBaseJitType, &index, &simdSize, true); if (simdStructNode == nullptr || index != 0 || simdBaseJitType != CORINFO_TYPE_FLOAT) { // if the RHS is not from a SIMD vector field X, then there is no need to check further. return false; } var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); var_types simdType = getSIMDTypeForSize(simdSize); int assignmentsCount = simdSize / genTypeSize(simdBaseType) - 1; int remainingAssignments = assignmentsCount; Statement* curStmt = stmt->GetNextStmt(); Statement* lastStmt = stmt; while (curStmt != nullptr && remainingAssignments > 0) { GenTree* exp = curStmt->GetRootNode(); if (exp->OperGet() != GT_ASG) { break; } GenTree* curLHS = exp->gtGetOp1(); GenTree* curRHS = exp->gtGetOp2(); if (!areArgumentsContiguous(prevLHS, curLHS) || !areArgumentsContiguous(prevRHS, curRHS)) { break; } remainingAssignments--; prevLHS = curLHS; prevRHS = curRHS; lastStmt = curStmt; curStmt = curStmt->GetNextStmt(); } if (remainingAssignments > 0) { // if the left assignments number is bigger than zero, then this means // that the assignments are not assgining to the contiguously memory // locations from same vector. return false; } #ifdef DEBUG if (verbose) { printf("\nFound contiguous assignments from a SIMD vector to memory.\n"); printf("From " FMT_BB ", stmt ", block->bbNum); printStmtID(stmt); printf(" to stmt"); printStmtID(lastStmt); printf("\n"); } #endif for (int i = 0; i < assignmentsCount; i++) { fgRemoveStmt(block, stmt->GetNextStmt()); } GenTree* dstNode; if (originalLHS->OperIs(GT_LCL_FLD)) { dstNode = originalLHS; dstNode->gtType = simdType; dstNode->AsLclFld()->SetFieldSeq(FieldSeqStore::NotAField()); // This may have changed a partial local field into full local field if (dstNode->IsPartialLclFld(this)) { dstNode->gtFlags |= GTF_VAR_USEASG; } else { dstNode->gtFlags &= ~GTF_VAR_USEASG; } } else { GenTree* copyBlkDst = createAddressNodeForSIMDInit(originalLHS, simdSize); if (simdStructNode->OperIsLocal()) { setLclRelatedToSIMDIntrinsic(simdStructNode); } GenTree* copyBlkAddr = copyBlkDst; if (copyBlkAddr->gtOper == GT_LEA) { copyBlkAddr = copyBlkAddr->AsAddrMode()->Base(); } GenTreeLclVarCommon* localDst = copyBlkAddr->IsLocalAddrExpr(); if (localDst != nullptr) { setLclRelatedToSIMDIntrinsic(localDst); } if (simdStructNode->TypeGet() == TYP_BYREF) { assert(simdStructNode->OperIsLocal()); assert(lvaIsImplicitByRefLocal(simdStructNode->AsLclVarCommon()->GetLclNum())); simdStructNode = gtNewIndir(simdType, simdStructNode); } else { assert(varTypeIsSIMD(simdStructNode)); } dstNode = gtNewOperNode(GT_IND, simdType, copyBlkDst); } #ifdef DEBUG if (verbose) { printf("\n" FMT_BB " stmt ", block->bbNum); printStmtID(stmt); printf("(before)\n"); gtDispStmt(stmt); } #endif assert(!simdStructNode->CanCSE()); simdStructNode->ClearDoNotCSE(); tree = gtNewAssignNode(dstNode, simdStructNode); stmt->SetRootNode(tree); // Since we generated a new address node which didn't exist before, // we should expose this address manually here. // TODO-ADDR: Remove this when LocalAddressVisitor transforms all // local field access into LCL_FLDs, at that point we would be // combining 2 existing LCL_FLDs or 2 FIELDs that do not reference // a local and thus cannot result in a new address exposed local. fgMarkAddressExposedLocals(stmt); #ifdef DEBUG if (verbose) { printf("\nReplaced " FMT_BB " stmt", block->bbNum); printStmtID(stmt); printf("(after)\n"); gtDispStmt(stmt); } #endif return true; } #endif // FEATURE_SIMD //------------------------------------------------------------------------ // fgCheckStmtAfterTailCall: check that statements after the tail call stmt // candidate are in one of expected forms, that are desctibed below. // // Return Value: // 'true' if stmts are in the expected form, else 'false'. // bool Compiler::fgCheckStmtAfterTailCall() { // For void calls, we would have created a GT_CALL in the stmt list. // For non-void calls, we would have created a GT_RETURN(GT_CAST(GT_CALL)). // For calls returning structs, we would have a void call, followed by a void return. // For debuggable code, it would be an assignment of the call to a temp // We want to get rid of any of this extra trees, and just leave // the call. Statement* callStmt = fgMorphStmt; Statement* nextMorphStmt = callStmt->GetNextStmt(); // Check that the rest stmts in the block are in one of the following pattern: // 1) ret(void) // 2) ret(cast*(callResultLclVar)) // 3) lclVar = callResultLclVar, the actual ret(lclVar) in another block // 4) nop if (nextMorphStmt != nullptr) { GenTree* callExpr = callStmt->GetRootNode(); if (callExpr->gtOper != GT_ASG) { // The next stmt can be GT_RETURN(TYP_VOID) or GT_RETURN(lclVar), // where lclVar was return buffer in the call for structs or simd. Statement* retStmt = nextMorphStmt; GenTree* retExpr = retStmt->GetRootNode(); noway_assert(retExpr->gtOper == GT_RETURN); nextMorphStmt = retStmt->GetNextStmt(); } else { noway_assert(callExpr->gtGetOp1()->OperIsLocal()); unsigned callResultLclNumber = callExpr->gtGetOp1()->AsLclVarCommon()->GetLclNum(); #if FEATURE_TAILCALL_OPT_SHARED_RETURN // We can have a chain of assignments from the call result to // various inline return spill temps. These are ok as long // as the last one ultimately provides the return value or is ignored. // // And if we're returning a small type we may see a cast // on the source side. while ((nextMorphStmt != nullptr) && (nextMorphStmt->GetRootNode()->OperIs(GT_ASG, GT_NOP))) { if (nextMorphStmt->GetRootNode()->OperIs(GT_NOP)) { nextMorphStmt = nextMorphStmt->GetNextStmt(); continue; } Statement* moveStmt = nextMorphStmt; GenTree* moveExpr = nextMorphStmt->GetRootNode(); GenTree* moveDest = moveExpr->gtGetOp1(); noway_assert(moveDest->OperIsLocal()); // Tunnel through any casts on the source side. GenTree* moveSource = moveExpr->gtGetOp2(); while (moveSource->OperIs(GT_CAST)) { noway_assert(!moveSource->gtOverflow()); moveSource = moveSource->gtGetOp1(); } noway_assert(moveSource->OperIsLocal()); // Verify we're just passing the value from one local to another // along the chain. const unsigned srcLclNum = moveSource->AsLclVarCommon()->GetLclNum(); noway_assert(srcLclNum == callResultLclNumber); const unsigned dstLclNum = moveDest->AsLclVarCommon()->GetLclNum(); callResultLclNumber = dstLclNum; nextMorphStmt = moveStmt->GetNextStmt(); } if (nextMorphStmt != nullptr) #endif { Statement* retStmt = nextMorphStmt; GenTree* retExpr = nextMorphStmt->GetRootNode(); noway_assert(retExpr->gtOper == GT_RETURN); GenTree* treeWithLcl = retExpr->gtGetOp1(); while (treeWithLcl->gtOper == GT_CAST) { noway_assert(!treeWithLcl->gtOverflow()); treeWithLcl = treeWithLcl->gtGetOp1(); } noway_assert(callResultLclNumber == treeWithLcl->AsLclVarCommon()->GetLclNum()); nextMorphStmt = retStmt->GetNextStmt(); } } } return nextMorphStmt == nullptr; } //------------------------------------------------------------------------ // fgCanTailCallViaJitHelper: check whether we can use the faster tailcall // JIT helper on x86. // // Return Value: // 'true' if we can; or 'false' if we should use the generic tailcall mechanism. // bool Compiler::fgCanTailCallViaJitHelper() { #if !defined(TARGET_X86) || defined(UNIX_X86_ABI) || defined(FEATURE_READYTORUN) // On anything except windows X86 we have no faster mechanism available. return false; #else // The JIT helper does not properly handle the case where localloc was used. if (compLocallocUsed) return false; return true; #endif } //------------------------------------------------------------------------ // fgMorphReduceAddOps: reduce successive variable adds into a single multiply, // e.g., i + i + i + i => i * 4. // // Arguments: // tree - tree for reduction // // Return Value: // reduced tree if pattern matches, original tree otherwise // GenTree* Compiler::fgMorphReduceAddOps(GenTree* tree) { // ADD(_, V0) starts the pattern match. if (!tree->OperIs(GT_ADD) || tree->gtOverflow()) { return tree; } #ifndef TARGET_64BIT // Transforming 64-bit ADD to 64-bit MUL on 32-bit system results in replacing // ADD ops with a helper function call. Don't apply optimization in that case. if (tree->TypeGet() == TYP_LONG) { return tree; } #endif GenTree* lclVarTree = tree->AsOp()->gtOp2; GenTree* consTree = tree->AsOp()->gtOp1; GenTree* op1 = consTree; GenTree* op2 = lclVarTree; if (!op2->OperIs(GT_LCL_VAR) || !varTypeIsIntegral(op2)) { return tree; } int foldCount = 0; unsigned lclNum = op2->AsLclVarCommon()->GetLclNum(); // Search for pattern of shape ADD(ADD(ADD(lclNum, lclNum), lclNum), lclNum). while (true) { // ADD(lclNum, lclNum), end of tree if (op1->OperIs(GT_LCL_VAR) && op1->AsLclVarCommon()->GetLclNum() == lclNum && op2->OperIs(GT_LCL_VAR) && op2->AsLclVarCommon()->GetLclNum() == lclNum) { foldCount += 2; break; } // ADD(ADD(X, Y), lclNum), keep descending else if (op1->OperIs(GT_ADD) && !op1->gtOverflow() && op2->OperIs(GT_LCL_VAR) && op2->AsLclVarCommon()->GetLclNum() == lclNum) { foldCount++; op2 = op1->AsOp()->gtOp2; op1 = op1->AsOp()->gtOp1; } // Any other case is a pattern we won't attempt to fold for now. else { return tree; } } // V0 + V0 ... + V0 becomes V0 * foldCount, where postorder transform will optimize // accordingly consTree->BashToConst(foldCount, tree->TypeGet()); GenTree* morphed = gtNewOperNode(GT_MUL, tree->TypeGet(), lclVarTree, consTree); DEBUG_DESTROY_NODE(tree); return morphed; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Morph XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif #include "allocacheck.h" // for alloca // Convert the given node into a call to the specified helper passing // the given argument list. // // Tries to fold constants and also adds an edge for overflow exception // returns the morphed tree GenTree* Compiler::fgMorphCastIntoHelper(GenTree* tree, int helper, GenTree* oper) { GenTree* result; /* If the operand is a constant, we'll try to fold it */ if (oper->OperIsConst()) { GenTree* oldTree = tree; tree = gtFoldExprConst(tree); // This may not fold the constant (NaN ...) if (tree != oldTree) { return fgMorphTree(tree); } else if (tree->OperIsConst()) { return fgMorphConst(tree); } // assert that oper is unchanged and that it is still a GT_CAST node noway_assert(tree->AsCast()->CastOp() == oper); noway_assert(tree->gtOper == GT_CAST); } result = fgMorphIntoHelperCall(tree, helper, gtNewCallArgs(oper)); assert(result == tree); return result; } /***************************************************************************** * * Convert the given node into a call to the specified helper passing * the given argument list. */ GenTree* Compiler::fgMorphIntoHelperCall(GenTree* tree, int helper, GenTreeCall::Use* args, bool morphArgs) { // The helper call ought to be semantically equivalent to the original node, so preserve its VN. tree->ChangeOper(GT_CALL, GenTree::PRESERVE_VN); GenTreeCall* call = tree->AsCall(); call->gtCallType = CT_HELPER; call->gtReturnType = tree->TypeGet(); call->gtCallMethHnd = eeFindHelper(helper); call->gtCallThisArg = nullptr; call->gtCallArgs = args; call->gtCallLateArgs = nullptr; call->fgArgInfo = nullptr; call->gtRetClsHnd = nullptr; call->gtCallMoreFlags = GTF_CALL_M_EMPTY; call->gtInlineCandidateInfo = nullptr; call->gtControlExpr = nullptr; #ifdef UNIX_X86_ABI call->gtFlags |= GTF_CALL_POP_ARGS; #endif // UNIX_X86_ABI #if DEBUG // Helper calls are never candidates. call->gtInlineObservation = InlineObservation::CALLSITE_IS_CALL_TO_HELPER; call->callSig = nullptr; #endif // DEBUG #ifdef FEATURE_READYTORUN call->gtEntryPoint.addr = nullptr; call->gtEntryPoint.accessType = IAT_VALUE; #endif #if FEATURE_MULTIREG_RET call->ResetReturnType(); call->ClearOtherRegs(); call->ClearOtherRegFlags(); #ifndef TARGET_64BIT if (varTypeIsLong(tree)) { call->InitializeLongReturnType(); } #endif // !TARGET_64BIT #endif // FEATURE_MULTIREG_RET if (tree->OperMayThrow(this)) { tree->gtFlags |= GTF_EXCEPT; } else { tree->gtFlags &= ~GTF_EXCEPT; } tree->gtFlags |= GTF_CALL; for (GenTreeCall::Use& use : GenTreeCall::UseList(args)) { tree->gtFlags |= (use.GetNode()->gtFlags & GTF_ALL_EFFECT); } /* Perform the morphing */ if (morphArgs) { tree = fgMorphArgs(call); } return tree; } //------------------------------------------------------------------------ // fgMorphExpandCast: Performs the pre-order (required) morphing for a cast. // // Performs a rich variety of pre-order transformations (and some optimizations). // // Notably: // 1. Splits long -> small type casts into long -> int -> small type // for 32 bit targets. Does the same for float/double -> small type // casts for all targets. // 2. Morphs casts not supported by the target directly into helpers. // These mostly have to do with casts from and to floating point // types, especially checked ones. Refer to the implementation for // what specific casts need to be handled - it is a complex matrix. // 3. "Casts away" the GC-ness of a tree (for CAST(nint <- byref)) via // assigning the GC tree to an inline - COMMA(ASG, LCL_VAR) - non-GC // temporary. // 3. "Pushes down" truncating long -> int casts for some operations: // CAST(int <- MUL(long, long)) => MUL(CAST(int <- long), CAST(int <- long)). // The purpose of this is to allow "optNarrowTree" in the post-order // traversal to fold the tree into a TYP_INT one, which helps 32 bit // targets (and AMD64 too since 32 bit instructions are more compact). // TODO-Arm64-CQ: Re-evaluate the value of this optimization for ARM64. // // Arguments: // tree - the cast tree to morph // // Return Value: // The fully morphed tree, or "nullptr" if it needs further morphing, // in which case the cast may be transformed into an unchecked one // and its operand changed (the cast "expanded" into two). // GenTree* Compiler::fgMorphExpandCast(GenTreeCast* tree) { GenTree* oper = tree->CastOp(); if (fgGlobalMorph && (oper->gtOper == GT_ADDR)) { // Make sure we've checked if 'oper' is an address of an implicit-byref parameter. // If it is, fgMorphImplicitByRefArgs will change its type, and we want the cast // morphing code to see that type. fgMorphImplicitByRefArgs(oper); } var_types srcType = genActualType(oper); var_types dstType = tree->CastToType(); unsigned dstSize = genTypeSize(dstType); // See if the cast has to be done in two steps. R -> I if (varTypeIsFloating(srcType) && varTypeIsIntegral(dstType)) { if (srcType == TYP_FLOAT #if defined(TARGET_ARM64) // Arm64: src = float, dst is overflow conversion. // This goes through helper and hence src needs to be converted to double. && tree->gtOverflow() #elif defined(TARGET_AMD64) // Amd64: src = float, dst = uint64 or overflow conversion. // This goes through helper and hence src needs to be converted to double. && (tree->gtOverflow() || (dstType == TYP_ULONG)) #elif defined(TARGET_ARM) // Arm: src = float, dst = int64/uint64 or overflow conversion. && (tree->gtOverflow() || varTypeIsLong(dstType)) #else // x86: src = float, dst = uint32/int64/uint64 or overflow conversion. && (tree->gtOverflow() || varTypeIsLong(dstType) || (dstType == TYP_UINT)) #endif ) { oper = gtNewCastNode(TYP_DOUBLE, oper, false, TYP_DOUBLE); } // Do we need to do it in two steps R -> I -> smallType? if (dstSize < genTypeSize(TYP_INT)) { oper = gtNewCastNodeL(TYP_INT, oper, /* fromUnsigned */ false, TYP_INT); oper->gtFlags |= (tree->gtFlags & (GTF_OVERFLOW | GTF_EXCEPT)); tree->AsCast()->CastOp() = oper; // We must not mistreat the original cast, which was from a floating point type, // as from an unsigned type, since we now have a TYP_INT node for the source and // CAST_OVF(BYTE <- INT) != CAST_OVF(BYTE <- UINT). assert(!tree->IsUnsigned()); } else { if (!tree->gtOverflow()) { #ifdef TARGET_ARM64 // ARM64 supports all non-overflow checking conversions directly. return nullptr; #else switch (dstType) { case TYP_INT: return nullptr; case TYP_UINT: #if defined(TARGET_ARM) || defined(TARGET_AMD64) return nullptr; #else // TARGET_X86 return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2UINT, oper); #endif // TARGET_X86 case TYP_LONG: #ifdef TARGET_AMD64 // SSE2 has instructions to convert a float/double directly to a long return nullptr; #else // !TARGET_AMD64 return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2LNG, oper); #endif // !TARGET_AMD64 case TYP_ULONG: return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2ULNG, oper); default: unreached(); } #endif // TARGET_ARM64 } else { switch (dstType) { case TYP_INT: return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2INT_OVF, oper); case TYP_UINT: return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2UINT_OVF, oper); case TYP_LONG: return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2LNG_OVF, oper); case TYP_ULONG: return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2ULNG_OVF, oper); default: unreached(); } } } } #ifndef TARGET_64BIT // The code generation phase (for x86 & ARM32) does not handle casts // directly from [u]long to anything other than [u]int. Insert an // intermediate cast to native int. else if (varTypeIsLong(srcType) && varTypeIsSmall(dstType)) { oper = gtNewCastNode(TYP_I_IMPL, oper, tree->IsUnsigned(), TYP_I_IMPL); oper->gtFlags |= (tree->gtFlags & (GTF_OVERFLOW | GTF_EXCEPT)); tree->ClearUnsigned(); tree->AsCast()->CastOp() = oper; } #endif //! TARGET_64BIT #ifdef TARGET_ARMARCH // AArch, unlike x86/amd64, has instructions that can cast directly from // all integers (except for longs on AArch32 of course) to floats. // Because there is no IL instruction conv.r4.un, uint/ulong -> float // casts are always imported as CAST(float <- CAST(double <- uint/ulong)). // We can eliminate the redundant intermediate cast as an optimization. else if ((dstType == TYP_FLOAT) && (srcType == TYP_DOUBLE) && oper->OperIs(GT_CAST) #ifdef TARGET_ARM && !varTypeIsLong(oper->AsCast()->CastOp()) #endif ) { oper->gtType = TYP_FLOAT; oper->CastToType() = TYP_FLOAT; return fgMorphTree(oper); } #endif // TARGET_ARMARCH #ifdef TARGET_ARM // converts long/ulong --> float/double casts into helper calls. else if (varTypeIsFloating(dstType) && varTypeIsLong(srcType)) { if (dstType == TYP_FLOAT) { // there is only a double helper, so we // - change the dsttype to double // - insert a cast from double to float // - recurse into the resulting tree tree->CastToType() = TYP_DOUBLE; tree->gtType = TYP_DOUBLE; tree = gtNewCastNode(TYP_FLOAT, tree, false, TYP_FLOAT); return fgMorphTree(tree); } if (tree->gtFlags & GTF_UNSIGNED) return fgMorphCastIntoHelper(tree, CORINFO_HELP_ULNG2DBL, oper); return fgMorphCastIntoHelper(tree, CORINFO_HELP_LNG2DBL, oper); } #endif // TARGET_ARM #ifdef TARGET_AMD64 // Do we have to do two step U4/8 -> R4/8 ? // Codegen supports the following conversion as one-step operation // a) Long -> R4/R8 // b) U8 -> R8 // // The following conversions are performed as two-step operations using above. // U4 -> R4/8 = U4-> Long -> R4/8 // U8 -> R4 = U8 -> R8 -> R4 else if (tree->IsUnsigned() && varTypeIsFloating(dstType)) { srcType = varTypeToUnsigned(srcType); if (srcType == TYP_ULONG) { if (dstType == TYP_FLOAT) { // Codegen can handle U8 -> R8 conversion. // U8 -> R4 = U8 -> R8 -> R4 // - change the dsttype to double // - insert a cast from double to float // - recurse into the resulting tree tree->CastToType() = TYP_DOUBLE; tree->gtType = TYP_DOUBLE; tree = gtNewCastNode(TYP_FLOAT, tree, false, TYP_FLOAT); return fgMorphTree(tree); } } else if (srcType == TYP_UINT) { oper = gtNewCastNode(TYP_LONG, oper, true, TYP_LONG); oper->gtFlags |= (tree->gtFlags & (GTF_OVERFLOW | GTF_EXCEPT)); tree->ClearUnsigned(); tree->CastOp() = oper; } } #endif // TARGET_AMD64 #ifdef TARGET_X86 // Do we have to do two step U4/8 -> R4/8 ? else if (tree->IsUnsigned() && varTypeIsFloating(dstType)) { srcType = varTypeToUnsigned(srcType); if (srcType == TYP_ULONG) { return fgMorphCastIntoHelper(tree, CORINFO_HELP_ULNG2DBL, oper); } else if (srcType == TYP_UINT) { oper = gtNewCastNode(TYP_LONG, oper, true, TYP_LONG); oper->gtFlags |= (tree->gtFlags & (GTF_OVERFLOW | GTF_EXCEPT)); tree->gtFlags &= ~GTF_UNSIGNED; return fgMorphCastIntoHelper(tree, CORINFO_HELP_LNG2DBL, oper); } } else if (((tree->gtFlags & GTF_UNSIGNED) == 0) && (srcType == TYP_LONG) && varTypeIsFloating(dstType)) { oper = fgMorphCastIntoHelper(tree, CORINFO_HELP_LNG2DBL, oper); // Since we don't have a Jit Helper that converts to a TYP_FLOAT // we just use the one that converts to a TYP_DOUBLE // and then add a cast to TYP_FLOAT // if ((dstType == TYP_FLOAT) && (oper->OperGet() == GT_CALL)) { // Fix the return type to be TYP_DOUBLE // oper->gtType = TYP_DOUBLE; // Add a Cast to TYP_FLOAT // tree = gtNewCastNode(TYP_FLOAT, oper, false, TYP_FLOAT); INDEBUG(tree->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED); return tree; } else { return oper; } } #endif // TARGET_X86 else if (varTypeIsGC(srcType) != varTypeIsGC(dstType)) { // We are casting away GC information. we would like to just // change the type to int, however this gives the emitter fits because // it believes the variable is a GC variable at the beginning of the // instruction group, but is not turned non-gc by the code generator // we fix this by copying the GC pointer to a non-gc pointer temp. noway_assert(!varTypeIsGC(dstType) && "How can we have a cast to a GCRef here?"); // We generate an assignment to an int and then do the cast from an int. With this we avoid // the gc problem and we allow casts to bytes, longs, etc... unsigned lclNum = lvaGrabTemp(true DEBUGARG("Cast away GC")); oper->gtType = TYP_I_IMPL; GenTree* asg = gtNewTempAssign(lclNum, oper); oper->gtType = srcType; // do the real cast GenTree* cast = gtNewCastNode(tree->TypeGet(), gtNewLclvNode(lclNum, TYP_I_IMPL), false, dstType); // Generate the comma tree oper = gtNewOperNode(GT_COMMA, tree->TypeGet(), asg, cast); return fgMorphTree(oper); } // Look for narrowing casts ([u]long -> [u]int) and try to push them // down into the operand before morphing it. // // It doesn't matter if this is cast is from ulong or long (i.e. if // GTF_UNSIGNED is set) because the transformation is only applied to // overflow-insensitive narrowing casts, which always silently truncate. // // Note that casts from [u]long to small integer types are handled above. if ((srcType == TYP_LONG) && ((dstType == TYP_INT) || (dstType == TYP_UINT))) { // As a special case, look for overflow-sensitive casts of an AND // expression, and see if the second operand is a small constant. Since // the result of an AND is bound by its smaller operand, it may be // possible to prove that the cast won't overflow, which will in turn // allow the cast's operand to be transformed. if (tree->gtOverflow() && (oper->OperGet() == GT_AND)) { GenTree* andOp2 = oper->AsOp()->gtOp2; // Look for a constant less than 2^{32} for a cast to uint, or less // than 2^{31} for a cast to int. int maxWidth = (dstType == TYP_UINT) ? 32 : 31; if ((andOp2->OperGet() == GT_CNS_NATIVELONG) && ((andOp2->AsIntConCommon()->LngValue() >> maxWidth) == 0)) { tree->ClearOverflow(); tree->SetAllEffectsFlags(oper); } } // Only apply this transformation during global morph, // when neither the cast node nor the oper node may throw an exception // based on the upper 32 bits. // if (fgGlobalMorph && !tree->gtOverflow() && !oper->gtOverflowEx()) { // For these operations the lower 32 bits of the result only depends // upon the lower 32 bits of the operands. // bool canPushCast = oper->OperIs(GT_ADD, GT_SUB, GT_MUL, GT_AND, GT_OR, GT_XOR, GT_NOT, GT_NEG); // For long LSH cast to int, there is a discontinuity in behavior // when the shift amount is 32 or larger. // // CAST(INT, LSH(1LL, 31)) == LSH(1, 31) // LSH(CAST(INT, 1LL), CAST(INT, 31)) == LSH(1, 31) // // CAST(INT, LSH(1LL, 32)) == 0 // LSH(CAST(INT, 1LL), CAST(INT, 32)) == LSH(1, 32) == LSH(1, 0) == 1 // // So some extra validation is needed. // if (oper->OperIs(GT_LSH)) { GenTree* shiftAmount = oper->AsOp()->gtOp2; // Expose constant value for shift, if possible, to maximize the number // of cases we can handle. shiftAmount = gtFoldExpr(shiftAmount); oper->AsOp()->gtOp2 = shiftAmount; #if DEBUG // We may remorph the shift amount tree again later, so clear any morphed flag. shiftAmount->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED; #endif // DEBUG if (shiftAmount->IsIntegralConst()) { const ssize_t shiftAmountValue = shiftAmount->AsIntCon()->IconValue(); if ((shiftAmountValue >= 64) || (shiftAmountValue < 0)) { // Shift amount is large enough or negative so result is undefined. // Don't try to optimize. assert(!canPushCast); } else if (shiftAmountValue >= 32) { // We know that we have a narrowing cast ([u]long -> [u]int) // and that we are casting to a 32-bit value, which will result in zero. // // Check to see if we have any side-effects that we must keep // if ((tree->gtFlags & GTF_ALL_EFFECT) == 0) { // Result of the shift is zero. DEBUG_DESTROY_NODE(tree); GenTree* zero = gtNewZeroConNode(TYP_INT); return fgMorphTree(zero); } else // We do have a side-effect { // We could create a GT_COMMA node here to keep the side-effect and return a zero // Instead we just don't try to optimize this case. canPushCast = false; } } else { // Shift amount is positive and small enough that we can push the cast through. canPushCast = true; } } else { // Shift amount is unknown. We can't optimize this case. assert(!canPushCast); } } if (canPushCast) { DEBUG_DESTROY_NODE(tree); // Insert narrowing casts for op1 and op2. oper->AsOp()->gtOp1 = gtNewCastNode(TYP_INT, oper->AsOp()->gtOp1, false, dstType); if (oper->AsOp()->gtOp2 != nullptr) { oper->AsOp()->gtOp2 = gtNewCastNode(TYP_INT, oper->AsOp()->gtOp2, false, dstType); } // Clear the GT_MUL_64RSLT if it is set. if (oper->gtOper == GT_MUL && (oper->gtFlags & GTF_MUL_64RSLT)) { oper->gtFlags &= ~GTF_MUL_64RSLT; } // The operation now produces a 32-bit result. oper->gtType = TYP_INT; // Remorph the new tree as the casts that we added may be folded away. return fgMorphTree(oper); } } } return nullptr; } #ifdef DEBUG const char* getNonStandardArgKindName(NonStandardArgKind kind) { switch (kind) { case NonStandardArgKind::None: return "None"; case NonStandardArgKind::PInvokeFrame: return "PInvokeFrame"; case NonStandardArgKind::PInvokeTarget: return "PInvokeTarget"; case NonStandardArgKind::PInvokeCookie: return "PInvokeCookie"; case NonStandardArgKind::WrapperDelegateCell: return "WrapperDelegateCell"; case NonStandardArgKind::ShiftLow: return "ShiftLow"; case NonStandardArgKind::ShiftHigh: return "ShiftHigh"; case NonStandardArgKind::FixedRetBuffer: return "FixedRetBuffer"; case NonStandardArgKind::VirtualStubCell: return "VirtualStubCell"; case NonStandardArgKind::R2RIndirectionCell: return "R2RIndirectionCell"; case NonStandardArgKind::ValidateIndirectCallTarget: return "ValidateIndirectCallTarget"; default: unreached(); } } void fgArgTabEntry::Dump() const { printf("fgArgTabEntry[arg %u", argNum); printf(" %d.%s", GetNode()->gtTreeID, GenTree::OpName(GetNode()->OperGet())); printf(" %s", varTypeName(argType)); printf(" (%s)", passedByRef ? "By ref" : "By value"); if (GetRegNum() != REG_STK) { printf(", %u reg%s:", numRegs, numRegs == 1 ? "" : "s"); for (unsigned i = 0; i < numRegs; i++) { printf(" %s", getRegName(regNums[i])); } } if (GetStackByteSize() > 0) { #if defined(DEBUG_ARG_SLOTS) printf(", numSlots=%u, slotNum=%u, byteSize=%u, byteOffset=%u", numSlots, slotNum, m_byteSize, m_byteOffset); #else printf(", byteSize=%u, byteOffset=%u", m_byteSize, m_byteOffset); #endif } printf(", byteAlignment=%u", m_byteAlignment); if (isLateArg()) { printf(", lateArgInx=%u", GetLateArgInx()); } if (IsSplit()) { printf(", isSplit"); } if (needTmp) { printf(", tmpNum=V%02u", tmpNum); } if (needPlace) { printf(", needPlace"); } if (isTmp) { printf(", isTmp"); } if (processed) { printf(", processed"); } if (IsHfaRegArg()) { printf(", isHfa(%s)", varTypeName(GetHfaType())); } if (isBackFilled) { printf(", isBackFilled"); } if (nonStandardArgKind != NonStandardArgKind::None) { printf(", nonStandard[%s]", getNonStandardArgKindName(nonStandardArgKind)); } if (isStruct) { printf(", isStruct"); } printf("]\n"); } #endif fgArgInfo::fgArgInfo(Compiler* comp, GenTreeCall* call, unsigned numArgs) { compiler = comp; callTree = call; argCount = 0; // filled in arg count, starts at zero DEBUG_ARG_SLOTS_ONLY(nextSlotNum = INIT_ARG_STACK_SLOT;) nextStackByteOffset = INIT_ARG_STACK_SLOT * TARGET_POINTER_SIZE; stkLevel = 0; #if defined(UNIX_X86_ABI) alignmentDone = false; stkSizeBytes = 0; padStkAlign = 0; #endif #if FEATURE_FIXED_OUT_ARGS outArgSize = 0; #endif argTableSize = numArgs; // the allocated table size hasRegArgs = false; hasStackArgs = false; argsComplete = false; argsSorted = false; needsTemps = false; if (argTableSize == 0) { argTable = nullptr; } else { argTable = new (compiler, CMK_fgArgInfoPtrArr) fgArgTabEntry*[argTableSize]; } } /***************************************************************************** * * fgArgInfo Copy Constructor * * This method needs to act like a copy constructor for fgArgInfo. * The newCall needs to have its fgArgInfo initialized such that * we have newCall that is an exact copy of the oldCall. * We have to take care since the argument information * in the argTable contains pointers that must point to the * new arguments and not the old arguments. */ fgArgInfo::fgArgInfo(GenTreeCall* newCall, GenTreeCall* oldCall) { fgArgInfo* oldArgInfo = oldCall->AsCall()->fgArgInfo; compiler = oldArgInfo->compiler; callTree = newCall; argCount = 0; // filled in arg count, starts at zero DEBUG_ARG_SLOTS_ONLY(nextSlotNum = INIT_ARG_STACK_SLOT;) nextStackByteOffset = INIT_ARG_STACK_SLOT * TARGET_POINTER_SIZE; stkLevel = oldArgInfo->stkLevel; #if defined(UNIX_X86_ABI) alignmentDone = oldArgInfo->alignmentDone; stkSizeBytes = oldArgInfo->stkSizeBytes; padStkAlign = oldArgInfo->padStkAlign; #endif #if FEATURE_FIXED_OUT_ARGS outArgSize = oldArgInfo->outArgSize; #endif argTableSize = oldArgInfo->argTableSize; argsComplete = false; argTable = nullptr; assert(oldArgInfo->argsComplete); if (argTableSize > 0) { argTable = new (compiler, CMK_fgArgInfoPtrArr) fgArgTabEntry*[argTableSize]; // Copy the old arg entries for (unsigned i = 0; i < argTableSize; i++) { argTable[i] = new (compiler, CMK_fgArgInfo) fgArgTabEntry(*oldArgInfo->argTable[i]); } // The copied arg entries contain pointers to old uses, they need // to be updated to point to new uses. if (newCall->gtCallThisArg != nullptr) { for (unsigned i = 0; i < argTableSize; i++) { if (argTable[i]->use == oldCall->gtCallThisArg) { argTable[i]->use = newCall->gtCallThisArg; break; } } } GenTreeCall::UseIterator newUse = newCall->Args().begin(); GenTreeCall::UseIterator newUseEnd = newCall->Args().end(); GenTreeCall::UseIterator oldUse = oldCall->Args().begin(); GenTreeCall::UseIterator oldUseEnd = newCall->Args().end(); for (; newUse != newUseEnd; ++newUse, ++oldUse) { for (unsigned i = 0; i < argTableSize; i++) { if (argTable[i]->use == oldUse.GetUse()) { argTable[i]->use = newUse.GetUse(); break; } } } newUse = newCall->LateArgs().begin(); newUseEnd = newCall->LateArgs().end(); oldUse = oldCall->LateArgs().begin(); oldUseEnd = newCall->LateArgs().end(); for (; newUse != newUseEnd; ++newUse, ++oldUse) { for (unsigned i = 0; i < argTableSize; i++) { if (argTable[i]->lateUse == oldUse.GetUse()) { argTable[i]->lateUse = newUse.GetUse(); break; } } } } argCount = oldArgInfo->argCount; DEBUG_ARG_SLOTS_ONLY(nextSlotNum = oldArgInfo->nextSlotNum;) nextStackByteOffset = oldArgInfo->nextStackByteOffset; hasRegArgs = oldArgInfo->hasRegArgs; hasStackArgs = oldArgInfo->hasStackArgs; argsComplete = true; argsSorted = true; } void fgArgInfo::AddArg(fgArgTabEntry* curArgTabEntry) { assert(argCount < argTableSize); argTable[argCount] = curArgTabEntry; argCount++; } fgArgTabEntry* fgArgInfo::AddRegArg(unsigned argNum, GenTree* node, GenTreeCall::Use* use, regNumber regNum, unsigned numRegs, unsigned byteSize, unsigned byteAlignment, bool isStruct, bool isFloatHfa, bool isVararg /*=false*/) { fgArgTabEntry* curArgTabEntry = new (compiler, CMK_fgArgInfo) fgArgTabEntry; // Any additional register numbers are set by the caller. // This is primarily because on ARM we don't yet know if it // will be split or if it is a double HFA, so the number of registers // may actually be less. curArgTabEntry->setRegNum(0, regNum); curArgTabEntry->argNum = argNum; curArgTabEntry->argType = node->TypeGet(); curArgTabEntry->use = use; curArgTabEntry->lateUse = nullptr; curArgTabEntry->numRegs = numRegs; #if defined(DEBUG_ARG_SLOTS) curArgTabEntry->slotNum = 0; curArgTabEntry->numSlots = 0; #endif curArgTabEntry->SetLateArgInx(UINT_MAX); curArgTabEntry->tmpNum = BAD_VAR_NUM; curArgTabEntry->SetSplit(false); curArgTabEntry->isTmp = false; curArgTabEntry->needTmp = false; curArgTabEntry->needPlace = false; curArgTabEntry->processed = false; if (GlobalJitOptions::compFeatureHfa) { curArgTabEntry->SetHfaElemKind(CORINFO_HFA_ELEM_NONE); } curArgTabEntry->isBackFilled = false; curArgTabEntry->nonStandardArgKind = NonStandardArgKind::None; curArgTabEntry->isStruct = isStruct; curArgTabEntry->SetIsVararg(isVararg); curArgTabEntry->SetByteAlignment(byteAlignment); curArgTabEntry->SetByteSize(byteSize, isStruct, isFloatHfa); curArgTabEntry->SetByteOffset(0); hasRegArgs = true; if (argCount >= argTableSize) { fgArgTabEntry** oldTable = argTable; argTable = new (compiler, CMK_fgArgInfoPtrArr) fgArgTabEntry*[argCount + 1]; memcpy(argTable, oldTable, argCount * sizeof(fgArgTabEntry*)); argTableSize++; } AddArg(curArgTabEntry); return curArgTabEntry; } #if defined(UNIX_AMD64_ABI) fgArgTabEntry* fgArgInfo::AddRegArg(unsigned argNum, GenTree* node, GenTreeCall::Use* use, regNumber regNum, unsigned numRegs, unsigned byteSize, unsigned byteAlignment, const bool isStruct, const bool isFloatHfa, const bool isVararg, const regNumber otherRegNum, const unsigned structIntRegs, const unsigned structFloatRegs, const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* const structDescPtr) { fgArgTabEntry* curArgTabEntry = AddRegArg(argNum, node, use, regNum, numRegs, byteSize, byteAlignment, isStruct, isFloatHfa, isVararg); assert(curArgTabEntry != nullptr); curArgTabEntry->isStruct = isStruct; // is this a struct arg curArgTabEntry->structIntRegs = structIntRegs; curArgTabEntry->structFloatRegs = structFloatRegs; INDEBUG(curArgTabEntry->checkIsStruct();) assert(numRegs <= 2); if (numRegs == 2) { curArgTabEntry->setRegNum(1, otherRegNum); } if (isStruct && structDescPtr != nullptr) { curArgTabEntry->structDesc.CopyFrom(*structDescPtr); } return curArgTabEntry; } #endif // defined(UNIX_AMD64_ABI) fgArgTabEntry* fgArgInfo::AddStkArg(unsigned argNum, GenTree* node, GenTreeCall::Use* use, unsigned numSlots, unsigned byteSize, unsigned byteAlignment, bool isStruct, bool isFloatHfa, bool isVararg /*=false*/) { fgArgTabEntry* curArgTabEntry = new (compiler, CMK_fgArgInfo) fgArgTabEntry; #if defined(DEBUG_ARG_SLOTS) if (!compMacOsArm64Abi()) { nextSlotNum = roundUp(nextSlotNum, byteAlignment / TARGET_POINTER_SIZE); } #endif nextStackByteOffset = roundUp(nextStackByteOffset, byteAlignment); DEBUG_ARG_SLOTS_ASSERT(nextStackByteOffset / TARGET_POINTER_SIZE == nextSlotNum); curArgTabEntry->setRegNum(0, REG_STK); curArgTabEntry->argNum = argNum; curArgTabEntry->argType = node->TypeGet(); curArgTabEntry->use = use; curArgTabEntry->lateUse = nullptr; #if defined(DEBUG_ARG_SLOTS) curArgTabEntry->numSlots = numSlots; curArgTabEntry->slotNum = nextSlotNum; #endif curArgTabEntry->numRegs = 0; #if defined(UNIX_AMD64_ABI) curArgTabEntry->structIntRegs = 0; curArgTabEntry->structFloatRegs = 0; #endif // defined(UNIX_AMD64_ABI) curArgTabEntry->SetLateArgInx(UINT_MAX); curArgTabEntry->tmpNum = BAD_VAR_NUM; curArgTabEntry->SetSplit(false); curArgTabEntry->isTmp = false; curArgTabEntry->needTmp = false; curArgTabEntry->needPlace = false; curArgTabEntry->processed = false; if (GlobalJitOptions::compFeatureHfa) { curArgTabEntry->SetHfaElemKind(CORINFO_HFA_ELEM_NONE); } curArgTabEntry->isBackFilled = false; curArgTabEntry->nonStandardArgKind = NonStandardArgKind::None; curArgTabEntry->isStruct = isStruct; curArgTabEntry->SetIsVararg(isVararg); curArgTabEntry->SetByteAlignment(byteAlignment); curArgTabEntry->SetByteSize(byteSize, isStruct, isFloatHfa); curArgTabEntry->SetByteOffset(nextStackByteOffset); hasStackArgs = true; AddArg(curArgTabEntry); DEBUG_ARG_SLOTS_ONLY(nextSlotNum += numSlots;) nextStackByteOffset += curArgTabEntry->GetByteSize(); return curArgTabEntry; } void fgArgInfo::RemorphReset() { DEBUG_ARG_SLOTS_ONLY(nextSlotNum = INIT_ARG_STACK_SLOT;) nextStackByteOffset = INIT_ARG_STACK_SLOT * TARGET_POINTER_SIZE; } //------------------------------------------------------------------------ // UpdateRegArg: Update the given fgArgTabEntry while morphing. // // Arguments: // curArgTabEntry - the fgArgTabEntry to update. // node - the tree node that defines the argument // reMorphing - a boolean value indicate whether we are remorphing the call // // Assumptions: // This must have already been determined to be at least partially passed in registers. // void fgArgInfo::UpdateRegArg(fgArgTabEntry* curArgTabEntry, GenTree* node, bool reMorphing) { bool isLateArg = curArgTabEntry->isLateArg(); // If this is a late arg, we'd better be updating it with a correctly marked node, and vice-versa. assert((isLateArg && ((node->gtFlags & GTF_LATE_ARG) != 0)) || (!isLateArg && ((node->gtFlags & GTF_LATE_ARG) == 0))); assert(curArgTabEntry->numRegs != 0); assert(curArgTabEntry->use->GetNode() == node); } //------------------------------------------------------------------------ // UpdateStkArg: Update the given fgArgTabEntry while morphing. // // Arguments: // curArgTabEntry - the fgArgTabEntry to update. // node - the tree node that defines the argument // reMorphing - a boolean value indicate whether we are remorphing the call // // Assumptions: // This must have already been determined to be passed on the stack. // void fgArgInfo::UpdateStkArg(fgArgTabEntry* curArgTabEntry, GenTree* node, bool reMorphing) { bool isLateArg = curArgTabEntry->isLateArg(); // If this is a late arg, we'd better be updating it with a correctly marked node, and vice-versa. assert((isLateArg && ((node->gtFlags & GTF_LATE_ARG) != 0)) || (!isLateArg && ((node->gtFlags & GTF_LATE_ARG) == 0))); noway_assert(curArgTabEntry->use != callTree->gtCallThisArg); assert((curArgTabEntry->GetRegNum() == REG_STK) || curArgTabEntry->IsSplit()); assert(curArgTabEntry->use->GetNode() == node); #if defined(DEBUG_ARG_SLOTS) if (!compMacOsArm64Abi()) { nextSlotNum = roundUp(nextSlotNum, curArgTabEntry->GetByteAlignment() / TARGET_POINTER_SIZE); assert(curArgTabEntry->slotNum == nextSlotNum); nextSlotNum += curArgTabEntry->numSlots; } #endif nextStackByteOffset = roundUp(nextStackByteOffset, curArgTabEntry->GetByteAlignment()); assert(curArgTabEntry->GetByteOffset() == nextStackByteOffset); nextStackByteOffset += curArgTabEntry->GetStackByteSize(); } void fgArgInfo::SplitArg(unsigned argNum, unsigned numRegs, unsigned numSlots) { fgArgTabEntry* curArgTabEntry = nullptr; assert(argNum < argCount); for (unsigned inx = 0; inx < argCount; inx++) { curArgTabEntry = argTable[inx]; if (curArgTabEntry->argNum == argNum) { break; } } assert(numRegs > 0); assert(numSlots > 0); if (argsComplete) { assert(curArgTabEntry->IsSplit() == true); assert(curArgTabEntry->numRegs == numRegs); DEBUG_ARG_SLOTS_ONLY(assert(curArgTabEntry->numSlots == numSlots);) assert(hasStackArgs == true); } else { curArgTabEntry->SetSplit(true); curArgTabEntry->numRegs = numRegs; DEBUG_ARG_SLOTS_ONLY(curArgTabEntry->numSlots = numSlots;) curArgTabEntry->SetByteOffset(0); hasStackArgs = true; } DEBUG_ARG_SLOTS_ONLY(nextSlotNum += numSlots;) // TODO-Cleanup: structs are aligned to 8 bytes on arm64 apple, so it would work, but pass the precise size. nextStackByteOffset += numSlots * TARGET_POINTER_SIZE; } //------------------------------------------------------------------------ // EvalToTmp: Replace the node in the given fgArgTabEntry with a temp // // Arguments: // curArgTabEntry - the fgArgTabEntry for the argument // tmpNum - the varNum for the temp // newNode - the assignment of the argument value to the temp // // Notes: // Although the name of this method is EvalToTmp, it doesn't actually create // the temp or the copy. // void fgArgInfo::EvalToTmp(fgArgTabEntry* curArgTabEntry, unsigned tmpNum, GenTree* newNode) { assert(curArgTabEntry->use != callTree->gtCallThisArg); assert(curArgTabEntry->use->GetNode() == newNode); assert(curArgTabEntry->GetNode() == newNode); curArgTabEntry->tmpNum = tmpNum; curArgTabEntry->isTmp = true; } void fgArgInfo::ArgsComplete() { bool hasStructRegArg = false; for (unsigned curInx = 0; curInx < argCount; curInx++) { fgArgTabEntry* curArgTabEntry = argTable[curInx]; assert(curArgTabEntry != nullptr); GenTree* argx = curArgTabEntry->GetNode(); if (curArgTabEntry->GetRegNum() == REG_STK) { assert(hasStackArgs == true); #if !FEATURE_FIXED_OUT_ARGS // On x86 we use push instructions to pass arguments: // The non-register arguments are evaluated and pushed in order // and they are never evaluated into temps // continue; #endif } #if FEATURE_ARG_SPLIT else if (curArgTabEntry->IsSplit()) { hasStructRegArg = true; assert(hasStackArgs == true); } #endif // FEATURE_ARG_SPLIT else // we have a register argument, next we look for a struct type. { if (varTypeIsStruct(argx) UNIX_AMD64_ABI_ONLY(|| curArgTabEntry->isStruct)) { hasStructRegArg = true; } } /* If the argument tree contains an assignment (GTF_ASG) then the argument and and every earlier argument (except constants) must be evaluated into temps since there may be other arguments that follow and they may use the value being assigned. EXAMPLE: ArgTab is "a, a=5, a" -> when we see the second arg "a=5" we know the first two arguments "a, a=5" have to be evaluated into temps For the case of an assignment, we only know that there exist some assignment someplace in the tree. We don't know what is being assigned so we are very conservative here and assume that any local variable could have been assigned. */ if (argx->gtFlags & GTF_ASG) { // If this is not the only argument, or it's a copyblk, or it already evaluates the expression to // a tmp, then we need a temp in the late arg list. if ((argCount > 1) || argx->OperIsCopyBlkOp() #ifdef FEATURE_FIXED_OUT_ARGS || curArgTabEntry->isTmp // I protect this by "FEATURE_FIXED_OUT_ARGS" to preserve the property // that we only have late non-register args when that feature is on. #endif // FEATURE_FIXED_OUT_ARGS ) { curArgTabEntry->needTmp = true; needsTemps = true; } // For all previous arguments, unless they are a simple constant // we require that they be evaluated into temps for (unsigned prevInx = 0; prevInx < curInx; prevInx++) { fgArgTabEntry* prevArgTabEntry = argTable[prevInx]; assert(prevArgTabEntry->argNum < curArgTabEntry->argNum); if (!prevArgTabEntry->GetNode()->IsInvariant()) { prevArgTabEntry->needTmp = true; needsTemps = true; } } } bool treatLikeCall = ((argx->gtFlags & GTF_CALL) != 0); #if FEATURE_FIXED_OUT_ARGS // Like calls, if this argument has a tree that will do an inline throw, // a call to a jit helper, then we need to treat it like a call (but only // if there are/were any stack args). // This means unnesting, sorting, etc. Technically this is overly // conservative, but I want to avoid as much special-case debug-only code // as possible, so leveraging the GTF_CALL flag is the easiest. // if (!treatLikeCall && (argx->gtFlags & GTF_EXCEPT) && (argCount > 1) && compiler->opts.compDbgCode && (compiler->fgWalkTreePre(&argx, Compiler::fgChkThrowCB) == Compiler::WALK_ABORT)) { for (unsigned otherInx = 0; otherInx < argCount; otherInx++) { if (otherInx == curInx) { continue; } if (argTable[otherInx]->GetRegNum() == REG_STK) { treatLikeCall = true; break; } } } #endif // FEATURE_FIXED_OUT_ARGS /* If it contains a call (GTF_CALL) then itself and everything before the call with a GLOB_EFFECT must eval to temp (this is because everything with SIDE_EFFECT has to be kept in the right order since we will move the call to the first position) For calls we don't have to be quite as conservative as we are with an assignment since the call won't be modifying any non-address taken LclVars. */ if (treatLikeCall) { if (argCount > 1) // If this is not the only argument { curArgTabEntry->needTmp = true; needsTemps = true; } else if (varTypeIsFloating(argx->TypeGet()) && (argx->OperGet() == GT_CALL)) { // Spill all arguments that are floating point calls curArgTabEntry->needTmp = true; needsTemps = true; } // All previous arguments may need to be evaluated into temps for (unsigned prevInx = 0; prevInx < curInx; prevInx++) { fgArgTabEntry* prevArgTabEntry = argTable[prevInx]; assert(prevArgTabEntry->argNum < curArgTabEntry->argNum); // For all previous arguments, if they have any GTF_ALL_EFFECT // we require that they be evaluated into a temp if ((prevArgTabEntry->GetNode()->gtFlags & GTF_ALL_EFFECT) != 0) { prevArgTabEntry->needTmp = true; needsTemps = true; } #if FEATURE_FIXED_OUT_ARGS // Or, if they are stored into the FIXED_OUT_ARG area // we require that they be moved to the gtCallLateArgs // and replaced with a placeholder node else if (prevArgTabEntry->GetRegNum() == REG_STK) { prevArgTabEntry->needPlace = true; } #if FEATURE_ARG_SPLIT else if (prevArgTabEntry->IsSplit()) { prevArgTabEntry->needPlace = true; } #endif // FEATURE_ARG_SPLIT #endif } } #if FEATURE_MULTIREG_ARGS // For RyuJIT backend we will expand a Multireg arg into a GT_FIELD_LIST // with multiple indirections, so here we consider spilling it into a tmp LclVar. // CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_ARM bool isMultiRegArg = (curArgTabEntry->numRegs > 0) && (curArgTabEntry->numRegs + curArgTabEntry->GetStackSlotsNumber() > 1); #else bool isMultiRegArg = (curArgTabEntry->numRegs > 1); #endif if ((varTypeIsStruct(argx->TypeGet())) && (curArgTabEntry->needTmp == false)) { if (isMultiRegArg && ((argx->gtFlags & GTF_PERSISTENT_SIDE_EFFECTS) != 0)) { // Spill multireg struct arguments that have Assignments or Calls embedded in them curArgTabEntry->needTmp = true; needsTemps = true; } else { // We call gtPrepareCost to measure the cost of evaluating this tree compiler->gtPrepareCost(argx); if (isMultiRegArg && (argx->GetCostEx() > (6 * IND_COST_EX))) { // Spill multireg struct arguments that are expensive to evaluate twice curArgTabEntry->needTmp = true; needsTemps = true; } #if defined(FEATURE_SIMD) && defined(TARGET_ARM64) else if (isMultiRegArg && varTypeIsSIMD(argx->TypeGet())) { // SIMD types do not need the optimization below due to their sizes if (argx->OperIsSimdOrHWintrinsic() || (argx->OperIs(GT_OBJ) && argx->AsObj()->gtOp1->OperIs(GT_ADDR) && argx->AsObj()->gtOp1->AsOp()->gtOp1->OperIsSimdOrHWintrinsic())) { curArgTabEntry->needTmp = true; needsTemps = true; } } #endif #ifndef TARGET_ARM // TODO-Arm: This optimization is not implemented for ARM32 // so we skip this for ARM32 until it is ported to use RyuJIT backend // else if (argx->OperGet() == GT_OBJ) { GenTreeObj* argObj = argx->AsObj(); unsigned structSize = argObj->GetLayout()->GetSize(); switch (structSize) { case 3: case 5: case 6: case 7: // If we have a stack based LclVar we can perform a wider read of 4 or 8 bytes // if (argObj->AsObj()->gtOp1->IsLocalAddrExpr() == nullptr) // Is the source not a LclVar? { // If we don't have a LclVar we need to read exactly 3,5,6 or 7 bytes // For now we use a a GT_CPBLK to copy the exact size into a GT_LCL_VAR temp. // curArgTabEntry->needTmp = true; needsTemps = true; } break; case 11: case 13: case 14: case 15: // Spill any GT_OBJ multireg structs that are difficult to extract // // When we have a GT_OBJ of a struct with the above sizes we would need // to use 3 or 4 load instructions to load the exact size of this struct. // Instead we spill the GT_OBJ into a new GT_LCL_VAR temp and this sequence // will use a GT_CPBLK to copy the exact size into the GT_LCL_VAR temp. // Then we can just load all 16 bytes of the GT_LCL_VAR temp when passing // the argument. // curArgTabEntry->needTmp = true; needsTemps = true; break; default: break; } } #endif // !TARGET_ARM } } #endif // FEATURE_MULTIREG_ARGS } // We only care because we can't spill structs and qmarks involve a lot of spilling, but // if we don't have qmarks, then it doesn't matter. // So check for Qmark's globally once here, instead of inside the loop. // const bool hasStructRegArgWeCareAbout = (hasStructRegArg && compiler->compQmarkUsed); #if FEATURE_FIXED_OUT_ARGS // For Arm/x64 we only care because we can't reorder a register // argument that uses GT_LCLHEAP. This is an optimization to // save a check inside the below loop. // const bool hasStackArgsWeCareAbout = (hasStackArgs && compiler->compLocallocUsed); #else const bool hasStackArgsWeCareAbout = hasStackArgs; #endif // FEATURE_FIXED_OUT_ARGS // If we have any stack args we have to force the evaluation // of any arguments passed in registers that might throw an exception // // Technically we only a required to handle the following two cases: // a GT_IND with GTF_IND_RNGCHK (only on x86) or // a GT_LCLHEAP node that allocates stuff on the stack // if (hasStackArgsWeCareAbout || hasStructRegArgWeCareAbout) { for (unsigned curInx = 0; curInx < argCount; curInx++) { fgArgTabEntry* curArgTabEntry = argTable[curInx]; assert(curArgTabEntry != nullptr); GenTree* argx = curArgTabEntry->GetNode(); // Examine the register args that are currently not marked needTmp // if (!curArgTabEntry->needTmp && (curArgTabEntry->GetRegNum() != REG_STK)) { if (hasStackArgsWeCareAbout) { #if !FEATURE_FIXED_OUT_ARGS // On x86 we previously recorded a stack depth of zero when // morphing the register arguments of any GT_IND with a GTF_IND_RNGCHK flag // Thus we can not reorder the argument after any stack based argument // (Note that GT_LCLHEAP sets the GTF_EXCEPT flag so we don't need to // check for it explicitly.) // if (argx->gtFlags & GTF_EXCEPT) { curArgTabEntry->needTmp = true; needsTemps = true; continue; } #else // For Arm/X64 we can't reorder a register argument that uses a GT_LCLHEAP // if (argx->gtFlags & GTF_EXCEPT) { assert(compiler->compLocallocUsed); // Returns WALK_ABORT if a GT_LCLHEAP node is encountered in the argx tree // if (compiler->fgWalkTreePre(&argx, Compiler::fgChkLocAllocCB) == Compiler::WALK_ABORT) { curArgTabEntry->needTmp = true; needsTemps = true; continue; } } #endif } if (hasStructRegArgWeCareAbout) { // Returns true if a GT_QMARK node is encountered in the argx tree // if (compiler->fgWalkTreePre(&argx, Compiler::fgChkQmarkCB) == Compiler::WALK_ABORT) { curArgTabEntry->needTmp = true; needsTemps = true; continue; } } } } } // When CFG is enabled and this is a delegate call or vtable call we must // compute the call target before all late args. However this will // effectively null-check 'this', which should happen only after all // arguments are evaluated. Thus we must evaluate all args with side // effects to a temp. if (compiler->opts.IsCFGEnabled() && (callTree->IsVirtualVtable() || callTree->IsDelegateInvoke())) { // Always evaluate 'this' to temp. argTable[0]->needTmp = true; needsTemps = true; for (unsigned curInx = 1; curInx < argCount; curInx++) { fgArgTabEntry* curArgTabEntry = argTable[curInx]; GenTree* arg = curArgTabEntry->GetNode(); if ((arg->gtFlags & GTF_ALL_EFFECT) != 0) { curArgTabEntry->needTmp = true; needsTemps = true; } } } argsComplete = true; } void fgArgInfo::SortArgs() { assert(argsComplete == true); #ifdef DEBUG if (compiler->verbose) { printf("\nSorting the arguments:\n"); } #endif /* Shuffle the arguments around before we build the gtCallLateArgs list. The idea is to move all "simple" arguments like constants and local vars to the end of the table, and move the complex arguments towards the beginning of the table. This will help prevent registers from being spilled by allowing us to evaluate the more complex arguments before the simpler arguments. The argTable ends up looking like: +------------------------------------+ <--- argTable[argCount - 1] | constants | +------------------------------------+ | local var / local field | +------------------------------------+ | remaining arguments sorted by cost | +------------------------------------+ | temps (argTable[].needTmp = true) | +------------------------------------+ | args with calls (GTF_CALL) | +------------------------------------+ <--- argTable[0] */ /* Set the beginning and end for the new argument table */ unsigned curInx; int regCount = 0; unsigned begTab = 0; unsigned endTab = argCount - 1; unsigned argsRemaining = argCount; // First take care of arguments that are constants. // [We use a backward iterator pattern] // curInx = argCount; do { curInx--; fgArgTabEntry* curArgTabEntry = argTable[curInx]; if (curArgTabEntry->GetRegNum() != REG_STK) { regCount++; } assert(curArgTabEntry->lateUse == nullptr); // Skip any already processed args // if (!curArgTabEntry->processed) { GenTree* argx = curArgTabEntry->GetNode(); // put constants at the end of the table // if (argx->gtOper == GT_CNS_INT) { noway_assert(curInx <= endTab); curArgTabEntry->processed = true; // place curArgTabEntry at the endTab position by performing a swap // if (curInx != endTab) { argTable[curInx] = argTable[endTab]; argTable[endTab] = curArgTabEntry; } endTab--; argsRemaining--; } } } while (curInx > 0); if (argsRemaining > 0) { // Next take care of arguments that are calls. // [We use a forward iterator pattern] // for (curInx = begTab; curInx <= endTab; curInx++) { fgArgTabEntry* curArgTabEntry = argTable[curInx]; // Skip any already processed args // if (!curArgTabEntry->processed) { GenTree* argx = curArgTabEntry->GetNode(); // put calls at the beginning of the table // if (argx->gtFlags & GTF_CALL) { curArgTabEntry->processed = true; // place curArgTabEntry at the begTab position by performing a swap // if (curInx != begTab) { argTable[curInx] = argTable[begTab]; argTable[begTab] = curArgTabEntry; } begTab++; argsRemaining--; } } } } if (argsRemaining > 0) { // Next take care arguments that are temps. // These temps come before the arguments that are // ordinary local vars or local fields // since this will give them a better chance to become // enregistered into their actual argument register. // [We use a forward iterator pattern] // for (curInx = begTab; curInx <= endTab; curInx++) { fgArgTabEntry* curArgTabEntry = argTable[curInx]; // Skip any already processed args // if (!curArgTabEntry->processed) { if (curArgTabEntry->needTmp) { curArgTabEntry->processed = true; // place curArgTabEntry at the begTab position by performing a swap // if (curInx != begTab) { argTable[curInx] = argTable[begTab]; argTable[begTab] = curArgTabEntry; } begTab++; argsRemaining--; } } } } if (argsRemaining > 0) { // Next take care of local var and local field arguments. // These are moved towards the end of the argument evaluation. // [We use a backward iterator pattern] // curInx = endTab + 1; do { curInx--; fgArgTabEntry* curArgTabEntry = argTable[curInx]; // Skip any already processed args // if (!curArgTabEntry->processed) { GenTree* argx = curArgTabEntry->GetNode(); if ((argx->gtOper == GT_LCL_VAR) || (argx->gtOper == GT_LCL_FLD)) { noway_assert(curInx <= endTab); curArgTabEntry->processed = true; // place curArgTabEntry at the endTab position by performing a swap // if (curInx != endTab) { argTable[curInx] = argTable[endTab]; argTable[endTab] = curArgTabEntry; } endTab--; argsRemaining--; } } } while (curInx > begTab); } // Finally, take care of all the remaining arguments. // Note that we fill in one arg at a time using a while loop. bool costsPrepared = false; // Only prepare tree costs once, the first time through this loop while (argsRemaining > 0) { /* Find the most expensive arg remaining and evaluate it next */ fgArgTabEntry* expensiveArgTabEntry = nullptr; unsigned expensiveArg = UINT_MAX; unsigned expensiveArgCost = 0; // [We use a forward iterator pattern] // for (curInx = begTab; curInx <= endTab; curInx++) { fgArgTabEntry* curArgTabEntry = argTable[curInx]; // Skip any already processed args // if (!curArgTabEntry->processed) { GenTree* argx = curArgTabEntry->GetNode(); // We should have already handled these kinds of args assert(argx->gtOper != GT_LCL_VAR); assert(argx->gtOper != GT_LCL_FLD); assert(argx->gtOper != GT_CNS_INT); // This arg should either have no persistent side effects or be the last one in our table // assert(((argx->gtFlags & GTF_PERSISTENT_SIDE_EFFECTS) == 0) || (curInx == (argCount-1))); if (argsRemaining == 1) { // This is the last arg to place expensiveArg = curInx; expensiveArgTabEntry = curArgTabEntry; assert(begTab == endTab); break; } else { if (!costsPrepared) { /* We call gtPrepareCost to measure the cost of evaluating this tree */ compiler->gtPrepareCost(argx); } if (argx->GetCostEx() > expensiveArgCost) { // Remember this arg as the most expensive one that we have yet seen expensiveArgCost = argx->GetCostEx(); expensiveArg = curInx; expensiveArgTabEntry = curArgTabEntry; } } } } noway_assert(expensiveArg != UINT_MAX); // put the most expensive arg towards the beginning of the table expensiveArgTabEntry->processed = true; // place expensiveArgTabEntry at the begTab position by performing a swap // if (expensiveArg != begTab) { argTable[expensiveArg] = argTable[begTab]; argTable[begTab] = expensiveArgTabEntry; } begTab++; argsRemaining--; costsPrepared = true; // If we have more expensive arguments, don't re-evaluate the tree cost on the next loop } // The table should now be completely filled and thus begTab should now be adjacent to endTab // and regArgsRemaining should be zero assert(begTab == (endTab + 1)); assert(argsRemaining == 0); argsSorted = true; } #ifdef DEBUG void fgArgInfo::Dump(Compiler* compiler) const { for (unsigned curInx = 0; curInx < ArgCount(); curInx++) { fgArgTabEntry* curArgEntry = ArgTable()[curInx]; curArgEntry->Dump(); } } #endif //------------------------------------------------------------------------------ // fgMakeTmpArgNode : This function creates a tmp var only if needed. // We need this to be done in order to enforce ordering // of the evaluation of arguments. // // Arguments: // curArgTabEntry // // Return Value: // the newly created temp var tree. GenTree* Compiler::fgMakeTmpArgNode(fgArgTabEntry* curArgTabEntry) { unsigned tmpVarNum = curArgTabEntry->tmpNum; LclVarDsc* varDsc = lvaGetDesc(tmpVarNum); assert(varDsc->lvIsTemp); var_types type = varDsc->TypeGet(); // Create a copy of the temp to go into the late argument list GenTree* arg = gtNewLclvNode(tmpVarNum, type); GenTree* addrNode = nullptr; if (varTypeIsStruct(type)) { #if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_ARM) // Can this type be passed as a primitive type? // If so, the following call will return the corresponding primitive type. // Otherwise, it will return TYP_UNKNOWN and we will pass it as a struct type. bool passedAsPrimitive = false; if (curArgTabEntry->TryPassAsPrimitive()) { CORINFO_CLASS_HANDLE clsHnd = varDsc->GetStructHnd(); var_types structBaseType = getPrimitiveTypeForStruct(lvaLclExactSize(tmpVarNum), clsHnd, curArgTabEntry->IsVararg()); if (structBaseType != TYP_UNKNOWN) { passedAsPrimitive = true; #if defined(UNIX_AMD64_ABI) // TODO-Cleanup: This is inelegant, but eventually we'll track this in the fgArgTabEntry, // and otherwise we'd have to either modify getPrimitiveTypeForStruct() to take // a structDesc or call eeGetSystemVAmd64PassStructInRegisterDescriptor yet again. // if (genIsValidFloatReg(curArgTabEntry->GetRegNum())) { if (structBaseType == TYP_INT) { structBaseType = TYP_FLOAT; } else { assert(structBaseType == TYP_LONG); structBaseType = TYP_DOUBLE; } } #endif type = structBaseType; } } // If it is passed in registers, don't get the address of the var. Make it a // field instead. It will be loaded in registers with putarg_reg tree in lower. if (passedAsPrimitive) { arg->ChangeOper(GT_LCL_FLD); arg->gtType = type; lvaSetVarDoNotEnregister(tmpVarNum DEBUGARG(DoNotEnregisterReason::SwizzleArg)); } else { var_types addrType = TYP_BYREF; arg = gtNewOperNode(GT_ADDR, addrType, arg); lvaSetVarAddrExposed(tmpVarNum DEBUGARG(AddressExposedReason::ESCAPE_ADDRESS)); addrNode = arg; #if FEATURE_MULTIREG_ARGS #ifdef TARGET_ARM64 assert(varTypeIsStruct(type)); if (lvaIsMultiregStruct(varDsc, curArgTabEntry->IsVararg())) { // We will create a GT_OBJ for the argument below. // This will be passed by value in two registers. assert(addrNode != nullptr); // Create an Obj of the temp to use it as a call argument. arg = gtNewObjNode(lvaGetStruct(tmpVarNum), arg); } #else // Always create an Obj of the temp to use it as a call argument. arg = gtNewObjNode(lvaGetStruct(tmpVarNum), arg); #endif // !TARGET_ARM64 #endif // FEATURE_MULTIREG_ARGS } #else // not (TARGET_AMD64 or TARGET_ARM64 or TARGET_ARM) // other targets, we pass the struct by value assert(varTypeIsStruct(type)); addrNode = gtNewOperNode(GT_ADDR, TYP_BYREF, arg); // Get a new Obj node temp to use it as a call argument. // gtNewObjNode will set the GTF_EXCEPT flag if this is not a local stack object. arg = gtNewObjNode(lvaGetStruct(tmpVarNum), addrNode); #endif // not (TARGET_AMD64 or TARGET_ARM64 or TARGET_ARM) } // (varTypeIsStruct(type)) if (addrNode != nullptr) { assert(addrNode->gtOper == GT_ADDR); // the child of a GT_ADDR is required to have this flag set addrNode->AsOp()->gtOp1->gtFlags |= GTF_DONT_CSE; } return arg; } //------------------------------------------------------------------------------ // EvalArgsToTemps : Create temp assignments and populate the LateArgs list. void fgArgInfo::EvalArgsToTemps() { assert(argsSorted); unsigned regArgInx = 0; // Now go through the argument table and perform the necessary evaluation into temps GenTreeCall::Use* tmpRegArgNext = nullptr; for (unsigned curInx = 0; curInx < argCount; curInx++) { fgArgTabEntry* curArgTabEntry = argTable[curInx]; assert(curArgTabEntry->lateUse == nullptr); GenTree* argx = curArgTabEntry->GetNode(); GenTree* setupArg = nullptr; GenTree* defArg; #if !FEATURE_FIXED_OUT_ARGS // Only ever set for FEATURE_FIXED_OUT_ARGS assert(curArgTabEntry->needPlace == false); // On x86 and other archs that use push instructions to pass arguments: // Only the register arguments need to be replaced with placeholder nodes. // Stacked arguments are evaluated and pushed (or stored into the stack) in order. // if (curArgTabEntry->GetRegNum() == REG_STK) continue; #endif if (curArgTabEntry->needTmp) { if (curArgTabEntry->isTmp) { // Create a copy of the temp to go into the late argument list defArg = compiler->fgMakeTmpArgNode(curArgTabEntry); // mark the original node as a late argument argx->gtFlags |= GTF_LATE_ARG; } else { // Create a temp assignment for the argument // Put the temp in the gtCallLateArgs list CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if (compiler->verbose) { printf("Argument with 'side effect'...\n"); compiler->gtDispTree(argx); } #endif #if defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI) noway_assert(argx->gtType != TYP_STRUCT); #endif unsigned tmpVarNum = compiler->lvaGrabTemp(true DEBUGARG("argument with side effect")); if (argx->gtOper == GT_MKREFANY) { // For GT_MKREFANY, typically the actual struct copying does // not have any side-effects and can be delayed. So instead // of using a temp for the whole struct, we can just use a temp // for operand that that has a side-effect GenTree* operand; if ((argx->AsOp()->gtOp2->gtFlags & GTF_ALL_EFFECT) == 0) { operand = argx->AsOp()->gtOp1; // In the early argument evaluation, place an assignment to the temp // from the source operand of the mkrefany setupArg = compiler->gtNewTempAssign(tmpVarNum, operand); // Replace the operand for the mkrefany with the new temp. argx->AsOp()->gtOp1 = compiler->gtNewLclvNode(tmpVarNum, operand->TypeGet()); } else if ((argx->AsOp()->gtOp1->gtFlags & GTF_ALL_EFFECT) == 0) { operand = argx->AsOp()->gtOp2; // In the early argument evaluation, place an assignment to the temp // from the source operand of the mkrefany setupArg = compiler->gtNewTempAssign(tmpVarNum, operand); // Replace the operand for the mkrefany with the new temp. argx->AsOp()->gtOp2 = compiler->gtNewLclvNode(tmpVarNum, operand->TypeGet()); } } if (setupArg != nullptr) { // Now keep the mkrefany for the late argument list defArg = argx; // Clear the side-effect flags because now both op1 and op2 have no side-effects defArg->gtFlags &= ~GTF_ALL_EFFECT; } else { setupArg = compiler->gtNewTempAssign(tmpVarNum, argx); LclVarDsc* varDsc = compiler->lvaGetDesc(tmpVarNum); var_types lclVarType = genActualType(argx->gtType); var_types scalarType = TYP_UNKNOWN; if (setupArg->OperIsCopyBlkOp()) { setupArg = compiler->fgMorphCopyBlock(setupArg); #if defined(TARGET_ARMARCH) || defined(UNIX_AMD64_ABI) if (lclVarType == TYP_STRUCT) { // This scalar LclVar widening step is only performed for ARM architectures. // CORINFO_CLASS_HANDLE clsHnd = compiler->lvaGetStruct(tmpVarNum); unsigned structSize = varDsc->lvExactSize; scalarType = compiler->getPrimitiveTypeForStruct(structSize, clsHnd, curArgTabEntry->IsVararg()); } #endif // TARGET_ARMARCH || defined (UNIX_AMD64_ABI) } // scalarType can be set to a wider type for ARM or unix amd64 architectures: (3 => 4) or (5,6,7 => // 8) if ((scalarType != TYP_UNKNOWN) && (scalarType != lclVarType)) { // Create a GT_LCL_FLD using the wider type to go to the late argument list defArg = compiler->gtNewLclFldNode(tmpVarNum, scalarType, 0); } else { // Create a copy of the temp to go to the late argument list defArg = compiler->gtNewLclvNode(tmpVarNum, lclVarType); } curArgTabEntry->isTmp = true; curArgTabEntry->tmpNum = tmpVarNum; #ifdef TARGET_ARM // Previously we might have thought the local was promoted, and thus the 'COPYBLK' // might have left holes in the used registers (see // fgAddSkippedRegsInPromotedStructArg). // Too bad we're not that smart for these intermediate temps... if (isValidIntArgReg(curArgTabEntry->GetRegNum()) && (curArgTabEntry->numRegs > 1)) { regNumber argReg = curArgTabEntry->GetRegNum(); regMaskTP allUsedRegs = genRegMask(curArgTabEntry->GetRegNum()); for (unsigned i = 1; i < curArgTabEntry->numRegs; i++) { argReg = genRegArgNext(argReg); allUsedRegs |= genRegMask(argReg); } } #endif // TARGET_ARM } /* mark the assignment as a late argument */ setupArg->gtFlags |= GTF_LATE_ARG; #ifdef DEBUG if (compiler->verbose) { printf("\n Evaluate to a temp:\n"); compiler->gtDispTree(setupArg); } #endif } } else // curArgTabEntry->needTmp == false { // On x86 - // Only register args are replaced with placeholder nodes // and the stack based arguments are evaluated and pushed in order. // // On Arm/x64 - When needTmp is false and needPlace is false, // the non-register arguments are evaluated and stored in order. // When needPlace is true we have a nested call that comes after // this argument so we have to replace it in the gtCallArgs list // (the initial argument evaluation list) with a placeholder. // if ((curArgTabEntry->GetRegNum() == REG_STK) && (curArgTabEntry->needPlace == false)) { continue; } /* No temp needed - move the whole node to the gtCallLateArgs list */ /* The argument is deferred and put in the late argument list */ defArg = argx; // Create a placeholder node to put in its place in gtCallLateArgs. // For a struct type we also need to record the class handle of the arg. CORINFO_CLASS_HANDLE clsHnd = NO_CLASS_HANDLE; #if defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI) // All structs are either passed (and retyped) as integral types, OR they // are passed by reference. noway_assert(argx->gtType != TYP_STRUCT); #else // !defined(TARGET_AMD64) || defined(UNIX_AMD64_ABI) if (defArg->TypeGet() == TYP_STRUCT) { clsHnd = compiler->gtGetStructHandleIfPresent(defArg); noway_assert(clsHnd != NO_CLASS_HANDLE); } #endif // !(defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI)) setupArg = compiler->gtNewArgPlaceHolderNode(defArg->gtType, clsHnd); /* mark the placeholder node as a late argument */ setupArg->gtFlags |= GTF_LATE_ARG; #ifdef DEBUG if (compiler->verbose) { if (curArgTabEntry->GetRegNum() == REG_STK) { printf("Deferred stack argument :\n"); } else { printf("Deferred argument ('%s'):\n", getRegName(curArgTabEntry->GetRegNum())); } compiler->gtDispTree(argx); printf("Replaced with placeholder node:\n"); compiler->gtDispTree(setupArg); } #endif } if (setupArg != nullptr) { noway_assert(curArgTabEntry->use->GetNode() == argx); curArgTabEntry->use->SetNode(setupArg); } /* deferred arg goes into the late argument list */ if (tmpRegArgNext == nullptr) { tmpRegArgNext = compiler->gtNewCallArgs(defArg); callTree->AsCall()->gtCallLateArgs = tmpRegArgNext; } else { noway_assert(tmpRegArgNext->GetNode() != nullptr); tmpRegArgNext->SetNext(compiler->gtNewCallArgs(defArg)); tmpRegArgNext = tmpRegArgNext->GetNext(); } curArgTabEntry->lateUse = tmpRegArgNext; curArgTabEntry->SetLateArgInx(regArgInx++); } #ifdef DEBUG if (compiler->verbose) { printf("\nShuffled argument table: "); for (unsigned curInx = 0; curInx < argCount; curInx++) { fgArgTabEntry* curArgTabEntry = argTable[curInx]; if (curArgTabEntry->GetRegNum() != REG_STK) { printf("%s ", getRegName(curArgTabEntry->GetRegNum())); } } printf("\n"); } #endif } //------------------------------------------------------------------------------ // fgMakeMultiUse : If the node is an unaliased local or constant clone it, // otherwise insert a comma form temp // // Arguments: // ppTree - a pointer to the child node we will be replacing with the comma expression that // evaluates ppTree to a temp and returns the result // // Return Value: // A fresh GT_LCL_VAR node referencing the temp which has not been used // // Notes: // Caller must ensure that if the node is an unaliased local, the second use this // creates will be evaluated before the local can be reassigned. // // Can be safely called in morph preorder, before GTF_GLOB_REF is reliable. // GenTree* Compiler::fgMakeMultiUse(GenTree** pOp) { GenTree* const tree = *pOp; if (tree->IsInvariant()) { return gtClone(tree); } else if (tree->IsLocal()) { // Can't rely on GTF_GLOB_REF here. // if (!lvaGetDesc(tree->AsLclVarCommon())->IsAddressExposed()) { return gtClone(tree); } } return fgInsertCommaFormTemp(pOp); } //------------------------------------------------------------------------------ // fgInsertCommaFormTemp: Create a new temporary variable to hold the result of *ppTree, // and replace *ppTree with comma(asg(newLcl, *ppTree), newLcl) // // Arguments: // ppTree - a pointer to the child node we will be replacing with the comma expression that // evaluates ppTree to a temp and returns the result // // structType - value type handle if the temp created is of TYP_STRUCT. // // Return Value: // A fresh GT_LCL_VAR node referencing the temp which has not been used // GenTree* Compiler::fgInsertCommaFormTemp(GenTree** ppTree, CORINFO_CLASS_HANDLE structType /*= nullptr*/) { GenTree* subTree = *ppTree; unsigned lclNum = lvaGrabTemp(true DEBUGARG("fgInsertCommaFormTemp is creating a new local variable")); if (varTypeIsStruct(subTree)) { assert(structType != nullptr); lvaSetStruct(lclNum, structType, false); } // If subTree->TypeGet() == TYP_STRUCT, gtNewTempAssign() will create a GT_COPYBLK tree. // The type of GT_COPYBLK is TYP_VOID. Therefore, we should use subTree->TypeGet() for // setting type of lcl vars created. GenTree* asg = gtNewTempAssign(lclNum, subTree); GenTree* load = new (this, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, subTree->TypeGet(), lclNum); GenTree* comma = gtNewOperNode(GT_COMMA, subTree->TypeGet(), asg, load); *ppTree = comma; return new (this, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, subTree->TypeGet(), lclNum); } //------------------------------------------------------------------------ // fgInitArgInfo: Construct the fgArgInfo for the call with the fgArgEntry for each arg // // Arguments: // callNode - the call for which we are generating the fgArgInfo // // Return Value: // None // // Notes: // This method is idempotent in that it checks whether the fgArgInfo has already been // constructed, and just returns. // This method only computes the arg table and arg entries for the call (the fgArgInfo), // and makes no modification of the args themselves. // // The IR for the call args can change for calls with non-standard arguments: some non-standard // arguments add new call argument IR nodes. // void Compiler::fgInitArgInfo(GenTreeCall* call) { GenTreeCall::Use* args; GenTree* argx; unsigned argIndex = 0; unsigned intArgRegNum = 0; unsigned fltArgRegNum = 0; DEBUG_ARG_SLOTS_ONLY(unsigned argSlots = 0;) bool callHasRetBuffArg = call->HasRetBufArg(); bool callIsVararg = call->IsVarargs(); #ifdef TARGET_ARM regMaskTP argSkippedRegMask = RBM_NONE; regMaskTP fltArgSkippedRegMask = RBM_NONE; #endif // TARGET_ARM #if defined(TARGET_X86) unsigned maxRegArgs = MAX_REG_ARG; // X86: non-const, must be calculated #else const unsigned maxRegArgs = MAX_REG_ARG; // other arch: fixed constant number #endif if (call->fgArgInfo != nullptr) { // We've already initialized and set the fgArgInfo. return; } JITDUMP("Initializing arg info for %d.%s:\n", call->gtTreeID, GenTree::OpName(call->gtOper)); // At this point, we should never have gtCallLateArgs, as this needs to be done before those are determined. assert(call->gtCallLateArgs == nullptr); if (TargetOS::IsUnix && callIsVararg) { // Currently native varargs is not implemented on non windows targets. // // Note that some targets like Arm64 Unix should not need much work as // the ABI is the same. While other targets may only need small changes // such as amd64 Unix, which just expects RAX to pass numFPArguments. NYI("Morphing Vararg call not yet implemented on non Windows targets."); } // Data structure for keeping track of non-standard args. Non-standard args are those that are not passed // following the normal calling convention or in the normal argument registers. We either mark existing // arguments as non-standard (such as the x8 return buffer register on ARM64), or we manually insert the // non-standard arguments into the argument list, below. class NonStandardArgs { struct NonStandardArg { GenTree* node; // The tree node representing this non-standard argument. // Note that this must be updated if the tree node changes due to morphing! regNumber reg; // The register to be assigned to this non-standard argument. NonStandardArgKind kind; // The kind of the non-standard arg }; ArrayStack<NonStandardArg> args; public: NonStandardArgs(CompAllocator alloc) : args(alloc, 3) // We will have at most 3 non-standard arguments { } //----------------------------------------------------------------------------- // Add: add a non-standard argument to the table of non-standard arguments // // Arguments: // node - a GenTree node that has a non-standard argument. // reg - the register to assign to this node. // // Return Value: // None. // void Add(GenTree* node, regNumber reg, NonStandardArgKind kind) { NonStandardArg nsa = {node, reg, kind}; args.Push(nsa); } //----------------------------------------------------------------------------- // Find: Look for a GenTree* in the set of non-standard args. // // Arguments: // node - a GenTree node to look for // // Return Value: // The index of the non-standard argument (a non-negative, unique, stable number). // If the node is not a non-standard argument, return -1. // int Find(GenTree* node) { for (int i = 0; i < args.Height(); i++) { if (node == args.Top(i).node) { return i; } } return -1; } //----------------------------------------------------------------------------- // Find: Look for a GenTree node in the non-standard arguments set. If found, // set the register to use for the node. // // Arguments: // node - a GenTree node to look for // pReg - an OUT argument. *pReg is set to the non-standard register to use if // 'node' is found in the non-standard argument set. // pKind - an OUT argument. *pKind is set to the kind of the non-standard arg. // // Return Value: // 'true' if 'node' is a non-standard argument. In this case, *pReg and *pKing are set. // 'false' otherwise (in this case, *pReg and *pKind are unmodified). // bool Find(GenTree* node, regNumber* pReg, NonStandardArgKind* pKind) { for (int i = 0; i < args.Height(); i++) { NonStandardArg& nsa = args.TopRef(i); if (node == nsa.node) { *pReg = nsa.reg; *pKind = nsa.kind; return true; } } return false; } //----------------------------------------------------------------------------- // Replace: Replace the non-standard argument node at a given index. This is done when // the original node was replaced via morphing, but we need to continue to assign a // particular non-standard arg to it. // // Arguments: // index - the index of the non-standard arg. It must exist. // node - the new GenTree node. // // Return Value: // None. // void Replace(int index, GenTree* node) { args.TopRef(index).node = node; } } nonStandardArgs(getAllocator(CMK_ArrayStack)); // Count of args. On first morph, this is counted before we've filled in the arg table. // On remorph, we grab it from the arg table. unsigned numArgs = 0; // First we need to count the args if (call->gtCallThisArg != nullptr) { numArgs++; } for (GenTreeCall::Use& use : call->Args()) { numArgs++; } // Insert or mark non-standard args. These are either outside the normal calling convention, or // arguments registers that don't follow the normal progression of argument registers in the calling // convention (such as for the ARM64 fixed return buffer argument x8). // // *********** NOTE ************* // The logic here must remain in sync with GetNonStandardAddedArgCount(), which is used to map arguments // in the implementation of fast tail call. // *********** END NOTE ********* CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_X86) || defined(TARGET_ARM) // The x86 and arm32 CORINFO_HELP_INIT_PINVOKE_FRAME helpers has a custom calling convention. // Set the argument registers correctly here. if (call->IsHelperCall(this, CORINFO_HELP_INIT_PINVOKE_FRAME)) { GenTreeCall::Use* args = call->gtCallArgs; GenTree* arg1 = args->GetNode(); assert(arg1 != nullptr); nonStandardArgs.Add(arg1, REG_PINVOKE_FRAME, NonStandardArgKind::PInvokeFrame); } #endif // defined(TARGET_X86) || defined(TARGET_ARM) #if defined(TARGET_ARM) // A non-standard calling convention using wrapper delegate invoke is used on ARM, only, for wrapper // delegates. It is used for VSD delegate calls where the VSD custom calling convention ABI requires passing // R4, a callee-saved register, with a special value. Since R4 is a callee-saved register, its value needs // to be preserved. Thus, the VM uses a wrapper delegate IL stub, which preserves R4 and also sets up R4 // correctly for the VSD call. The VM is simply reusing an existing mechanism (wrapper delegate IL stub) // to achieve its goal for delegate VSD call. See COMDelegate::NeedsWrapperDelegate() in the VM for details. else if (call->gtCallMoreFlags & GTF_CALL_M_WRAPPER_DELEGATE_INV) { GenTree* arg = call->gtCallThisArg->GetNode(); if (arg->OperIsLocal()) { arg = gtClone(arg, true); } else { GenTree* tmp = fgInsertCommaFormTemp(&arg); call->gtCallThisArg->SetNode(arg); call->gtFlags |= GTF_ASG; arg = tmp; } noway_assert(arg != nullptr); GenTree* newArg = new (this, GT_ADDR) GenTreeAddrMode(TYP_BYREF, arg, nullptr, 0, eeGetEEInfo()->offsetOfWrapperDelegateIndirectCell); // Append newArg as the last arg GenTreeCall::Use** insertionPoint = &call->gtCallArgs; for (; *insertionPoint != nullptr; insertionPoint = &((*insertionPoint)->NextRef())) { } *insertionPoint = gtNewCallArgs(newArg); numArgs++; nonStandardArgs.Add(newArg, virtualStubParamInfo->GetReg(), NonStandardArgKind::WrapperDelegateCell); } #endif // defined(TARGET_ARM) #if defined(TARGET_X86) // The x86 shift helpers have custom calling conventions and expect the lo part of the long to be in EAX and the // hi part to be in EDX. This sets the argument registers up correctly. else if (call->IsHelperCall(this, CORINFO_HELP_LLSH) || call->IsHelperCall(this, CORINFO_HELP_LRSH) || call->IsHelperCall(this, CORINFO_HELP_LRSZ)) { GenTreeCall::Use* args = call->gtCallArgs; GenTree* arg1 = args->GetNode(); assert(arg1 != nullptr); nonStandardArgs.Add(arg1, REG_LNGARG_LO, NonStandardArgKind::ShiftLow); args = args->GetNext(); GenTree* arg2 = args->GetNode(); assert(arg2 != nullptr); nonStandardArgs.Add(arg2, REG_LNGARG_HI, NonStandardArgKind::ShiftHigh); } #else // !TARGET_X86 // TODO-X86-CQ: Currently RyuJIT/x86 passes args on the stack, so this is not needed. // If/when we change that, the following code needs to be changed to correctly support the (TBD) managed calling // convention for x86/SSE. // If we have a Fixed Return Buffer argument register then we setup a non-standard argument for it. // // We don't use the fixed return buffer argument if we have the special unmanaged instance call convention. // That convention doesn't use the fixed return buffer register. // CLANG_FORMAT_COMMENT_ANCHOR; if (call->HasFixedRetBufArg()) { args = call->gtCallArgs; assert(args != nullptr); argx = call->gtCallArgs->GetNode(); // We don't increment numArgs here, since we already counted this argument above. nonStandardArgs.Add(argx, theFixedRetBuffReg(), NonStandardArgKind::FixedRetBuffer); } // We are allowed to have a Fixed Return Buffer argument combined // with any of the remaining non-standard arguments // CLANG_FORMAT_COMMENT_ANCHOR; if (call->IsVirtualStub()) { if (!call->IsTailCallViaJitHelper()) { GenTree* stubAddrArg = fgGetStubAddrArg(call); // And push the stub address onto the list of arguments call->gtCallArgs = gtPrependNewCallArg(stubAddrArg, call->gtCallArgs); numArgs++; nonStandardArgs.Add(stubAddrArg, stubAddrArg->GetRegNum(), NonStandardArgKind::VirtualStubCell); } else { // If it is a VSD call getting dispatched via tail call helper, // fgMorphTailCallViaJitHelper() would materialize stub addr as an additional // parameter added to the original arg list and hence no need to // add as a non-standard arg. } } else #endif // !TARGET_X86 if (call->gtCallType == CT_INDIRECT && (call->gtCallCookie != nullptr)) { assert(!call->IsUnmanaged()); GenTree* arg = call->gtCallCookie; noway_assert(arg != nullptr); call->gtCallCookie = nullptr; // All architectures pass the cookie in a register. call->gtCallArgs = gtPrependNewCallArg(arg, call->gtCallArgs); nonStandardArgs.Add(arg, REG_PINVOKE_COOKIE_PARAM, NonStandardArgKind::PInvokeCookie); numArgs++; // put destination into R10/EAX arg = gtClone(call->gtCallAddr, true); call->gtCallArgs = gtPrependNewCallArg(arg, call->gtCallArgs); numArgs++; nonStandardArgs.Add(arg, REG_PINVOKE_TARGET_PARAM, NonStandardArgKind::PInvokeTarget); // finally change this call to a helper call call->gtCallType = CT_HELPER; call->gtCallMethHnd = eeFindHelper(CORINFO_HELP_PINVOKE_CALLI); } #if defined(FEATURE_READYTORUN) // For arm/arm64, we dispatch code same as VSD using virtualStubParamInfo->GetReg() // for indirection cell address, which ZapIndirectHelperThunk expects. // For x64/x86 we use return address to get the indirection cell by disassembling the call site. // That is not possible for fast tailcalls, so we only need this logic for fast tailcalls on xarch. // Note that we call this before we know if something will be a fast tailcall or not. // That's ok; after making something a tailcall, we will invalidate this information // and reconstruct it if necessary. The tailcalling decision does not change since // this is a non-standard arg in a register. bool needsIndirectionCell = call->IsR2RRelativeIndir() && !call->IsDelegateInvoke(); #if defined(TARGET_XARCH) needsIndirectionCell &= call->IsFastTailCall(); #endif if (needsIndirectionCell) { assert(call->gtEntryPoint.addr != nullptr); size_t addrValue = (size_t)call->gtEntryPoint.addr; GenTree* indirectCellAddress = gtNewIconHandleNode(addrValue, GTF_ICON_FTN_ADDR); #ifdef DEBUG indirectCellAddress->AsIntCon()->gtTargetHandle = (size_t)call->gtCallMethHnd; #endif indirectCellAddress->SetRegNum(REG_R2R_INDIRECT_PARAM); #ifdef TARGET_ARM // Issue #xxxx : Don't attempt to CSE this constant on ARM32 // // This constant has specific register requirements, and LSRA doesn't currently correctly // handle them when the value is in a CSE'd local. indirectCellAddress->SetDoNotCSE(); #endif // TARGET_ARM // Push the stub address onto the list of arguments. call->gtCallArgs = gtPrependNewCallArg(indirectCellAddress, call->gtCallArgs); numArgs++; nonStandardArgs.Add(indirectCellAddress, indirectCellAddress->GetRegNum(), NonStandardArgKind::R2RIndirectionCell); } #endif if ((REG_VALIDATE_INDIRECT_CALL_ADDR != REG_ARG_0) && call->IsHelperCall(this, CORINFO_HELP_VALIDATE_INDIRECT_CALL)) { assert(call->gtCallArgs != nullptr); GenTreeCall::Use* args = call->gtCallArgs; GenTree* tar = args->GetNode(); nonStandardArgs.Add(tar, REG_VALIDATE_INDIRECT_CALL_ADDR, NonStandardArgKind::ValidateIndirectCallTarget); } // Allocate the fgArgInfo for the call node; // call->fgArgInfo = new (this, CMK_Unknown) fgArgInfo(this, call, numArgs); // Add the 'this' argument value, if present. if (call->gtCallThisArg != nullptr) { argx = call->gtCallThisArg->GetNode(); assert(argIndex == 0); assert(call->gtCallType == CT_USER_FUNC || call->gtCallType == CT_INDIRECT); assert(varTypeIsGC(argx) || (argx->gtType == TYP_I_IMPL)); const regNumber regNum = genMapIntRegArgNumToRegNum(intArgRegNum); const unsigned numRegs = 1; const unsigned byteSize = TARGET_POINTER_SIZE; const unsigned byteAlignment = TARGET_POINTER_SIZE; const bool isStruct = false; const bool isFloatHfa = false; // This is a register argument - put it in the table. call->fgArgInfo->AddRegArg(argIndex, argx, call->gtCallThisArg, regNum, numRegs, byteSize, byteAlignment, isStruct, isFloatHfa, callIsVararg UNIX_AMD64_ABI_ONLY_ARG(REG_STK) UNIX_AMD64_ABI_ONLY_ARG(0) UNIX_AMD64_ABI_ONLY_ARG(0) UNIX_AMD64_ABI_ONLY_ARG(nullptr)); intArgRegNum++; #ifdef WINDOWS_AMD64_ABI // Whenever we pass an integer register argument // we skip the corresponding floating point register argument fltArgRegNum++; #endif // WINDOWS_AMD64_ABI argIndex++; DEBUG_ARG_SLOTS_ONLY(argSlots++;) } #ifdef TARGET_X86 // Compute the maximum number of arguments that can be passed in registers. // For X86 we handle the varargs and unmanaged calling conventions #ifndef UNIX_X86_ABI if (call->gtFlags & GTF_CALL_POP_ARGS) { noway_assert(intArgRegNum < MAX_REG_ARG); // No more register arguments for varargs (CALL_POP_ARGS) maxRegArgs = intArgRegNum; // Add in the ret buff arg if (callHasRetBuffArg) maxRegArgs++; } #endif // UNIX_X86_ABI if (call->IsUnmanaged()) { noway_assert(intArgRegNum == 0); if (call->gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL) { noway_assert(call->gtCallArgs->GetNode()->TypeGet() == TYP_I_IMPL || call->gtCallArgs->GetNode()->TypeGet() == TYP_BYREF || call->gtCallArgs->GetNode()->gtOper == GT_NOP); // the arg was already morphed to a register (fgMorph called twice) maxRegArgs = 1; } else { maxRegArgs = 0; } #ifdef UNIX_X86_ABI // Add in the ret buff arg if (callHasRetBuffArg && call->unmgdCallConv != CorInfoCallConvExtension::C && // C and Stdcall calling conventions do not call->unmgdCallConv != CorInfoCallConvExtension::Stdcall) // use registers to pass arguments. maxRegArgs++; #endif } #endif // TARGET_X86 /* Morph the user arguments */ CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_ARM) // The ARM ABI has a concept of back-filling of floating-point argument registers, according // to the "Procedure Call Standard for the ARM Architecture" document, especially // section 6.1.2.3 "Parameter passing". Back-filling is where floating-point argument N+1 can // appear in a lower-numbered register than floating point argument N. That is, argument // register allocation is not strictly increasing. To support this, we need to keep track of unused // floating-point argument registers that we can back-fill. We only support 4-byte float and // 8-byte double types, and one to four element HFAs composed of these types. With this, we will // only back-fill single registers, since there is no way with these types to create // an alignment hole greater than one register. However, there can be up to 3 back-fill slots // available (with 16 FP argument registers). Consider this code: // // struct HFA { float x, y, z; }; // a three element HFA // void bar(float a1, // passed in f0 // double a2, // passed in f2/f3; skip f1 for alignment // HFA a3, // passed in f4/f5/f6 // double a4, // passed in f8/f9; skip f7 for alignment. NOTE: it doesn't fit in the f1 back-fill slot // HFA a5, // passed in f10/f11/f12 // double a6, // passed in f14/f15; skip f13 for alignment. NOTE: it doesn't fit in the f1 or f7 back-fill // // slots // float a7, // passed in f1 (back-filled) // float a8, // passed in f7 (back-filled) // float a9, // passed in f13 (back-filled) // float a10) // passed on the stack in [OutArg+0] // // Note that if we ever support FP types with larger alignment requirements, then there could // be more than single register back-fills. // // Once we assign a floating-pointer register to the stack, they all must be on the stack. // See "Procedure Call Standard for the ARM Architecture", section 6.1.2.3, "The back-filling // continues only so long as no VFP CPRC has been allocated to a slot on the stack." // We set anyFloatStackArgs to true when a floating-point argument has been assigned to the stack // and prevent any additional floating-point arguments from going in registers. bool anyFloatStackArgs = false; #endif // TARGET_ARM #ifdef UNIX_AMD64_ABI SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc; #endif // UNIX_AMD64_ABI #if defined(DEBUG) // Check that we have valid information about call's argument types. // For example: // load byte; call(int) -> CALL(PUTARG_TYPE byte(IND byte)); // load int; call(byte) -> CALL(PUTARG_TYPE int (IND int)); // etc. if (call->callSig != nullptr) { CORINFO_SIG_INFO* sig = call->callSig; const unsigned sigArgsCount = sig->numArgs; GenTreeCall::Use* nodeArgs = call->gtCallArgs; // It could include many arguments not included in `sig->numArgs`, for example, `this`, runtime lookup, cookie // etc. unsigned nodeArgsCount = 0; call->VisitOperands([&nodeArgsCount](GenTree* operand) -> GenTree::VisitResult { nodeArgsCount++; return GenTree::VisitResult::Continue; }); if (call->gtCallThisArg != nullptr) { // Handle the most common argument not in the `sig->numArgs`. // so the following check works on more methods. nodeArgsCount--; } assert(nodeArgsCount >= sigArgsCount); if ((nodeArgsCount == sigArgsCount) && ((Target::g_tgtArgOrder == Target::ARG_ORDER_R2L) || (nodeArgsCount == 1))) { CORINFO_ARG_LIST_HANDLE sigArg = sig->args; for (unsigned i = 0; i < sig->numArgs; ++i) { CORINFO_CLASS_HANDLE argClass; const CorInfoType corType = strip(info.compCompHnd->getArgType(sig, sigArg, &argClass)); const var_types sigType = JITtype2varType(corType); assert(nodeArgs != nullptr); const GenTree* nodeArg = nodeArgs->GetNode(); assert(nodeArg != nullptr); const var_types nodeType = nodeArg->TypeGet(); assert((nodeType == sigType) || varTypeIsStruct(sigType) || genTypeSize(nodeType) == genTypeSize(sigType)); sigArg = info.compCompHnd->getArgNext(sigArg); nodeArgs = nodeArgs->GetNext(); } assert(nodeArgs == nullptr); } } #endif // DEBUG for (args = call->gtCallArgs; args != nullptr; args = args->GetNext(), argIndex++) { argx = args->GetNode()->gtSkipPutArgType(); // Change the node to TYP_I_IMPL so we don't report GC info // NOTE: We deferred this from the importer because of the inliner. if (argx->IsLocalAddrExpr() != nullptr) { argx->gtType = TYP_I_IMPL; } // We should never have any ArgPlaceHolder nodes at this point. assert(!argx->IsArgPlaceHolderNode()); // Setup any HFA information about 'argx' bool isHfaArg = false; var_types hfaType = TYP_UNDEF; unsigned hfaSlots = 0; bool passUsingFloatRegs; unsigned argAlignBytes = TARGET_POINTER_SIZE; unsigned size = 0; unsigned byteSize = 0; if (GlobalJitOptions::compFeatureHfa) { hfaType = GetHfaType(argx); isHfaArg = varTypeIsValidHfaType(hfaType); #if defined(TARGET_ARM64) if (TargetOS::IsWindows) { // Make sure for vararg methods isHfaArg is not true. isHfaArg = callIsVararg ? false : isHfaArg; } #endif // defined(TARGET_ARM64) if (isHfaArg) { isHfaArg = true; hfaSlots = GetHfaCount(argx); // If we have a HFA struct it's possible we transition from a method that originally // only had integer types to now start having FP types. We have to communicate this // through this flag since LSRA later on will use this flag to determine whether // or not to track the FP register set. // compFloatingPointUsed = true; } } const bool isFloatHfa = (hfaType == TYP_FLOAT); #ifdef TARGET_ARM passUsingFloatRegs = !callIsVararg && (isHfaArg || varTypeUsesFloatReg(argx)) && !opts.compUseSoftFP; bool passUsingIntRegs = passUsingFloatRegs ? false : (intArgRegNum < MAX_REG_ARG); // We don't use the "size" return value from InferOpSizeAlign(). codeGen->InferOpSizeAlign(argx, &argAlignBytes); argAlignBytes = roundUp(argAlignBytes, TARGET_POINTER_SIZE); if (argAlignBytes == 2 * TARGET_POINTER_SIZE) { if (passUsingFloatRegs) { if (fltArgRegNum % 2 == 1) { fltArgSkippedRegMask |= genMapArgNumToRegMask(fltArgRegNum, TYP_FLOAT); fltArgRegNum++; } } else if (passUsingIntRegs) { if (intArgRegNum % 2 == 1) { argSkippedRegMask |= genMapArgNumToRegMask(intArgRegNum, TYP_I_IMPL); intArgRegNum++; } } #if defined(DEBUG) if (argSlots % 2 == 1) { argSlots++; } #endif } #elif defined(TARGET_ARM64) assert(!callIsVararg || !isHfaArg); passUsingFloatRegs = !callIsVararg && (isHfaArg || varTypeUsesFloatReg(argx)); #elif defined(TARGET_AMD64) passUsingFloatRegs = varTypeIsFloating(argx); #elif defined(TARGET_X86) passUsingFloatRegs = false; #else #error Unsupported or unset target architecture #endif // TARGET* bool isBackFilled = false; unsigned nextFltArgRegNum = fltArgRegNum; // This is the next floating-point argument register number to use var_types structBaseType = TYP_STRUCT; unsigned structSize = 0; bool passStructByRef = false; bool isStructArg; GenTree* actualArg = argx->gtEffectiveVal(true /* Commas only */); // // Figure out the size of the argument. This is either in number of registers, or number of // TARGET_POINTER_SIZE stack slots, or the sum of these if the argument is split between the registers and // the stack. // isStructArg = varTypeIsStruct(argx); CORINFO_CLASS_HANDLE objClass = NO_CLASS_HANDLE; if (isStructArg) { objClass = gtGetStructHandle(argx); if (argx->TypeGet() == TYP_STRUCT) { // For TYP_STRUCT arguments we must have an OBJ, LCL_VAR or MKREFANY switch (actualArg->OperGet()) { case GT_OBJ: structSize = actualArg->AsObj()->GetLayout()->GetSize(); assert(structSize == info.compCompHnd->getClassSize(objClass)); break; case GT_LCL_VAR: structSize = lvaGetDesc(actualArg->AsLclVarCommon())->lvExactSize; break; case GT_MKREFANY: structSize = info.compCompHnd->getClassSize(objClass); break; default: BADCODE("illegal argument tree in fgInitArgInfo"); break; } } else { structSize = genTypeSize(argx); assert(structSize == info.compCompHnd->getClassSize(objClass)); } } #if defined(TARGET_AMD64) #ifdef UNIX_AMD64_ABI if (!isStructArg) { size = 1; // On AMD64, all primitives fit in a single (64-bit) 'slot' byteSize = genTypeSize(argx); } else { size = (unsigned)(roundUp(structSize, TARGET_POINTER_SIZE)) / TARGET_POINTER_SIZE; byteSize = structSize; eeGetSystemVAmd64PassStructInRegisterDescriptor(objClass, &structDesc); } #else // !UNIX_AMD64_ABI size = 1; // On AMD64 Windows, all args fit in a single (64-bit) 'slot' if (!isStructArg) { byteSize = genTypeSize(argx); } #endif // UNIX_AMD64_ABI #elif defined(TARGET_ARM64) if (isStructArg) { if (isHfaArg) { // HFA structs are passed by value in multiple registers. // The "size" in registers may differ the size in pointer-sized units. CORINFO_CLASS_HANDLE structHnd = gtGetStructHandle(argx); size = GetHfaCount(structHnd); byteSize = info.compCompHnd->getClassSize(structHnd); } else { // Structs are either passed in 1 or 2 (64-bit) slots. // Structs that are the size of 2 pointers are passed by value in multiple registers, // if sufficient registers are available. // Structs that are larger than 2 pointers (except for HFAs) are passed by // reference (to a copy) size = (unsigned)(roundUp(structSize, TARGET_POINTER_SIZE)) / TARGET_POINTER_SIZE; byteSize = structSize; if (size > 2) { size = 1; } } // Note that there are some additional rules for multireg structs. // (i.e they cannot be split between registers and the stack) } else { size = 1; // Otherwise, all primitive types fit in a single (64-bit) 'slot' byteSize = genTypeSize(argx); } #elif defined(TARGET_ARM) || defined(TARGET_X86) if (isStructArg) { size = (unsigned)(roundUp(structSize, TARGET_POINTER_SIZE)) / TARGET_POINTER_SIZE; byteSize = structSize; } else { // The typical case. // Long/double type argument(s) will be modified as needed in Lowering. size = genTypeStSz(argx->gtType); byteSize = genTypeSize(argx); } #else #error Unsupported or unset target architecture #endif // TARGET_XXX if (isStructArg) { assert(argx == args->GetNode()); assert(structSize != 0); structPassingKind howToPassStruct; structBaseType = getArgTypeForStruct(objClass, &howToPassStruct, callIsVararg, structSize); passStructByRef = (howToPassStruct == SPK_ByReference); if (howToPassStruct == SPK_ByReference) { byteSize = TARGET_POINTER_SIZE; } else { byteSize = structSize; } if (howToPassStruct == SPK_PrimitiveType) { #ifdef TARGET_ARM // TODO-CQ: getArgTypeForStruct should *not* return TYP_DOUBLE for a double struct, // or for a struct of two floats. This causes the struct to be address-taken. if (structBaseType == TYP_DOUBLE) { size = 2; } else #endif // TARGET_ARM { size = 1; } } else if (passStructByRef) { size = 1; } } const var_types argType = args->GetNode()->TypeGet(); if (args->GetNode()->OperIs(GT_PUTARG_TYPE)) { byteSize = genTypeSize(argType); } // The 'size' value has now must have been set. (the original value of zero is an invalid value) assert(size != 0); assert(byteSize != 0); if (compMacOsArm64Abi()) { // Arm64 Apple has a special ABI for passing small size arguments on stack, // bytes are aligned to 1-byte, shorts to 2-byte, int/float to 4-byte, etc. // It means passing 8 1-byte arguments on stack can take as small as 8 bytes. argAlignBytes = eeGetArgSizeAlignment(argType, isFloatHfa); } // // Figure out if the argument will be passed in a register. // bool isRegArg = false; NonStandardArgKind nonStandardArgKind = NonStandardArgKind::None; regNumber nonStdRegNum = REG_NA; if (isRegParamType(genActualType(argx->TypeGet())) #ifdef UNIX_AMD64_ABI && (!isStructArg || structDesc.passedInRegisters) #elif defined(TARGET_X86) || (isStructArg && isTrivialPointerSizedStruct(objClass)) #endif ) { #ifdef TARGET_ARM if (passUsingFloatRegs) { // First, see if it can be back-filled if (!anyFloatStackArgs && // Is it legal to back-fill? (We haven't put any FP args on the stack yet) (fltArgSkippedRegMask != RBM_NONE) && // Is there an available back-fill slot? (size == 1)) // The size to back-fill is one float register { // Back-fill the register. isBackFilled = true; regMaskTP backFillBitMask = genFindLowestBit(fltArgSkippedRegMask); fltArgSkippedRegMask &= ~backFillBitMask; // Remove the back-filled register(s) from the skipped mask nextFltArgRegNum = genMapFloatRegNumToRegArgNum(genRegNumFromMask(backFillBitMask)); assert(nextFltArgRegNum < MAX_FLOAT_REG_ARG); } // Does the entire float, double, or HFA fit in the FP arg registers? // Check if the last register needed is still in the argument register range. isRegArg = (nextFltArgRegNum + size - 1) < MAX_FLOAT_REG_ARG; if (!isRegArg) { anyFloatStackArgs = true; } } else { isRegArg = intArgRegNum < MAX_REG_ARG; } #elif defined(TARGET_ARM64) if (passUsingFloatRegs) { // Check if the last register needed is still in the fp argument register range. isRegArg = (nextFltArgRegNum + (size - 1)) < MAX_FLOAT_REG_ARG; // Do we have a HFA arg that we wanted to pass in registers, but we ran out of FP registers? if (isHfaArg && !isRegArg) { // recompute the 'size' so that it represent the number of stack slots rather than the number of // registers // unsigned roundupSize = (unsigned)roundUp(structSize, TARGET_POINTER_SIZE); size = roundupSize / TARGET_POINTER_SIZE; // We also must update fltArgRegNum so that we no longer try to // allocate any new floating point registers for args // This prevents us from backfilling a subsequent arg into d7 // fltArgRegNum = MAX_FLOAT_REG_ARG; } } else { // Check if the last register needed is still in the int argument register range. isRegArg = (intArgRegNum + (size - 1)) < maxRegArgs; // Did we run out of registers when we had a 16-byte struct (size===2) ? // (i.e we only have one register remaining but we needed two registers to pass this arg) // This prevents us from backfilling a subsequent arg into x7 // if (!isRegArg && (size > 1)) { // Arm64 windows native varargs allows splitting a 16 byte struct between stack // and the last general purpose register. if (TargetOS::IsWindows && callIsVararg) { // Override the decision and force a split. isRegArg = (intArgRegNum + (size - 1)) <= maxRegArgs; } else { // We also must update intArgRegNum so that we no longer try to // allocate any new general purpose registers for args // intArgRegNum = maxRegArgs; } } } #else // not TARGET_ARM or TARGET_ARM64 #if defined(UNIX_AMD64_ABI) // Here a struct can be passed in register following the classifications of its members and size. // Now make sure there are actually enough registers to do so. if (isStructArg) { unsigned int structFloatRegs = 0; unsigned int structIntRegs = 0; for (unsigned int i = 0; i < structDesc.eightByteCount; i++) { if (structDesc.IsIntegralSlot(i)) { structIntRegs++; } else if (structDesc.IsSseSlot(i)) { structFloatRegs++; } } isRegArg = ((nextFltArgRegNum + structFloatRegs) <= MAX_FLOAT_REG_ARG) && ((intArgRegNum + structIntRegs) <= MAX_REG_ARG); } else { if (passUsingFloatRegs) { isRegArg = nextFltArgRegNum < MAX_FLOAT_REG_ARG; } else { isRegArg = intArgRegNum < MAX_REG_ARG; } } #else // !defined(UNIX_AMD64_ABI) isRegArg = (intArgRegNum + (size - 1)) < maxRegArgs; #endif // !defined(UNIX_AMD64_ABI) #endif // TARGET_ARM } else { isRegArg = false; } // If there are nonstandard args (outside the calling convention) they were inserted above // and noted them in a table so we can recognize them here and build their argInfo. // // They should not affect the placement of any other args or stack space required. // Example: on AMD64 R10 and R11 are used for indirect VSD (generic interface) and cookie calls. bool isNonStandard = nonStandardArgs.Find(argx, &nonStdRegNum, &nonStandardArgKind); if (isNonStandard) { isRegArg = (nonStdRegNum != REG_STK); } else if (call->IsTailCallViaJitHelper()) { // We have already (before calling fgMorphArgs()) appended the 4 special args // required by the x86 tailcall helper. These args are required to go on the // stack. Force them to the stack here. assert(numArgs >= 4); if (argIndex >= numArgs - 4) { isRegArg = false; } } // Now we know if the argument goes in registers or not and how big it is. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_ARM // If we ever allocate a floating point argument to the stack, then all // subsequent HFA/float/double arguments go on the stack. if (!isRegArg && passUsingFloatRegs) { for (; fltArgRegNum < MAX_FLOAT_REG_ARG; ++fltArgRegNum) { fltArgSkippedRegMask |= genMapArgNumToRegMask(fltArgRegNum, TYP_FLOAT); } } // If we think we're going to split a struct between integer registers and the stack, check to // see if we've already assigned a floating-point arg to the stack. if (isRegArg && // We decided above to use a register for the argument !passUsingFloatRegs && // We're using integer registers (intArgRegNum + size > MAX_REG_ARG) && // We're going to split a struct type onto registers and stack anyFloatStackArgs) // We've already used the stack for a floating-point argument { isRegArg = false; // Change our mind; don't pass this struct partially in registers // Skip the rest of the integer argument registers for (; intArgRegNum < MAX_REG_ARG; ++intArgRegNum) { argSkippedRegMask |= genMapArgNumToRegMask(intArgRegNum, TYP_I_IMPL); } } #endif // TARGET_ARM // Now create the fgArgTabEntry. fgArgTabEntry* newArgEntry; if (isRegArg) { regNumber nextRegNum = REG_STK; #if defined(UNIX_AMD64_ABI) regNumber nextOtherRegNum = REG_STK; unsigned int structFloatRegs = 0; unsigned int structIntRegs = 0; #endif // defined(UNIX_AMD64_ABI) if (isNonStandard) { nextRegNum = nonStdRegNum; } #if defined(UNIX_AMD64_ABI) else if (isStructArg && structDesc.passedInRegisters) { // It is a struct passed in registers. Assign the next available register. assert((structDesc.eightByteCount <= 2) && "Too many eightbytes."); regNumber* nextRegNumPtrs[2] = {&nextRegNum, &nextOtherRegNum}; for (unsigned int i = 0; i < structDesc.eightByteCount; i++) { if (structDesc.IsIntegralSlot(i)) { *nextRegNumPtrs[i] = genMapIntRegArgNumToRegNum(intArgRegNum + structIntRegs); ++structIntRegs; } else if (structDesc.IsSseSlot(i)) { *nextRegNumPtrs[i] = genMapFloatRegArgNumToRegNum(nextFltArgRegNum + structFloatRegs); ++structFloatRegs; } } } #endif // defined(UNIX_AMD64_ABI) else { // fill in or update the argInfo table nextRegNum = passUsingFloatRegs ? genMapFloatRegArgNumToRegNum(nextFltArgRegNum) : genMapIntRegArgNumToRegNum(intArgRegNum); } #ifdef TARGET_AMD64 #ifndef UNIX_AMD64_ABI assert(size == 1); #endif #endif // This is a register argument - put it in the table newArgEntry = call->fgArgInfo->AddRegArg(argIndex, argx, args, nextRegNum, size, byteSize, argAlignBytes, isStructArg, isFloatHfa, callIsVararg UNIX_AMD64_ABI_ONLY_ARG(nextOtherRegNum) UNIX_AMD64_ABI_ONLY_ARG(structIntRegs) UNIX_AMD64_ABI_ONLY_ARG(structFloatRegs) UNIX_AMD64_ABI_ONLY_ARG(&structDesc)); newArgEntry->SetIsBackFilled(isBackFilled); // Set up the next intArgRegNum and fltArgRegNum values. if (!isBackFilled) { #if defined(UNIX_AMD64_ABI) if (isStructArg) { // For this case, we've already set the regNums in the argTabEntry intArgRegNum += structIntRegs; fltArgRegNum += structFloatRegs; } else #endif // defined(UNIX_AMD64_ABI) { if (!isNonStandard) { #if FEATURE_ARG_SPLIT // Check for a split (partially enregistered) struct if (compFeatureArgSplit() && !passUsingFloatRegs && ((intArgRegNum + size) > MAX_REG_ARG)) { // This indicates a partial enregistration of a struct type assert((isStructArg) || argx->OperIs(GT_FIELD_LIST) || argx->OperIsCopyBlkOp() || (argx->gtOper == GT_COMMA && (argx->gtFlags & GTF_ASG))); unsigned numRegsPartial = MAX_REG_ARG - intArgRegNum; assert((unsigned char)numRegsPartial == numRegsPartial); call->fgArgInfo->SplitArg(argIndex, numRegsPartial, size - numRegsPartial); } #endif // FEATURE_ARG_SPLIT if (passUsingFloatRegs) { fltArgRegNum += size; #ifdef WINDOWS_AMD64_ABI // Whenever we pass an integer register argument // we skip the corresponding floating point register argument intArgRegNum = min(intArgRegNum + size, MAX_REG_ARG); #endif // WINDOWS_AMD64_ABI // No supported architecture supports partial structs using float registers. assert(fltArgRegNum <= MAX_FLOAT_REG_ARG); } else { // Increment intArgRegNum by 'size' registers intArgRegNum += size; #ifdef WINDOWS_AMD64_ABI fltArgRegNum = min(fltArgRegNum + size, MAX_FLOAT_REG_ARG); #endif // WINDOWS_AMD64_ABI } } } } } else // We have an argument that is not passed in a register { // This is a stack argument - put it in the table newArgEntry = call->fgArgInfo->AddStkArg(argIndex, argx, args, size, byteSize, argAlignBytes, isStructArg, isFloatHfa, callIsVararg); #ifdef UNIX_AMD64_ABI // TODO-Amd64-Unix-CQ: This is temporary (see also in fgMorphArgs). if (structDesc.passedInRegisters) { newArgEntry->structDesc.CopyFrom(structDesc); } #endif } newArgEntry->nonStandardArgKind = nonStandardArgKind; if (GlobalJitOptions::compFeatureHfa) { if (isHfaArg) { newArgEntry->SetHfaType(hfaType, hfaSlots); } } newArgEntry->SetMultiRegNums(); noway_assert(newArgEntry != nullptr); if (newArgEntry->isStruct) { newArgEntry->passedByRef = passStructByRef; newArgEntry->argType = (structBaseType == TYP_UNKNOWN) ? argx->TypeGet() : structBaseType; } else { newArgEntry->argType = argx->TypeGet(); } DEBUG_ARG_SLOTS_ONLY(argSlots += size;) } // end foreach argument loop #ifdef DEBUG if (verbose) { JITDUMP("ArgTable for %d.%s after fgInitArgInfo:\n", call->gtTreeID, GenTree::OpName(call->gtOper)); call->fgArgInfo->Dump(this); JITDUMP("\n"); } #endif } //------------------------------------------------------------------------ // fgMorphArgs: Walk and transform (morph) the arguments of a call // // Arguments: // callNode - the call for which we are doing the argument morphing // // Return Value: // Like most morph methods, this method returns the morphed node, // though in this case there are currently no scenarios where the // node itself is re-created. // // Notes: // This calls fgInitArgInfo to create the 'fgArgInfo' for the call. // If it has already been created, that method will simply return. // // This method changes the state of the call node. It uses the existence // of gtCallLateArgs (the late arguments list) to determine if it has // already done the first round of morphing. // // The first time it is called (i.e. during global morphing), this method // computes the "late arguments". This is when it determines which arguments // need to be evaluated to temps prior to the main argument setup, and which // can be directly evaluated into the argument location. It also creates a // second argument list (gtCallLateArgs) that does the final placement of the // arguments, e.g. into registers or onto the stack. // // The "non-late arguments", aka the gtCallArgs, are doing the in-order // evaluation of the arguments that might have side-effects, such as embedded // assignments, calls or possible throws. In these cases, it and earlier // arguments must be evaluated to temps. // // On targets with a fixed outgoing argument area (FEATURE_FIXED_OUT_ARGS), // if we have any nested calls, we need to defer the copying of the argument // into the fixed argument area until after the call. If the argument did not // otherwise need to be computed into a temp, it is moved to gtCallLateArgs and // replaced in the "early" arg list (gtCallArgs) with a placeholder node. #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function #endif GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call) { GenTreeCall::Use* args; GenTree* argx; GenTreeFlags flagsSummary = GTF_EMPTY; unsigned argIndex = 0; DEBUG_ARG_SLOTS_ONLY(unsigned argSlots = 0;) bool reMorphing = call->AreArgsComplete(); // Set up the fgArgInfo. fgInitArgInfo(call); JITDUMP("%sMorphing args for %d.%s:\n", (reMorphing) ? "Re" : "", call->gtTreeID, GenTree::OpName(call->gtOper)); // If we are remorphing, process the late arguments (which were determined by a previous caller). if (reMorphing) { for (GenTreeCall::Use& use : call->LateArgs()) { use.SetNode(fgMorphTree(use.GetNode())); flagsSummary |= use.GetNode()->gtFlags; } assert(call->fgArgInfo != nullptr); } call->fgArgInfo->RemorphReset(); // First we morph the argument subtrees ('this' pointer, arguments, etc.). // During the first call to fgMorphArgs we also record the // information about late arguments we have in 'fgArgInfo'. // This information is used later to contruct the gtCallLateArgs */ // Process the 'this' argument value, if present. if (call->gtCallThisArg != nullptr) { argx = call->gtCallThisArg->GetNode(); fgArgTabEntry* thisArgEntry = call->fgArgInfo->GetArgEntry(0, reMorphing); argx = fgMorphTree(argx); call->gtCallThisArg->SetNode(argx); // This is a register argument - possibly update it in the table. call->fgArgInfo->UpdateRegArg(thisArgEntry, argx, reMorphing); flagsSummary |= argx->gtFlags; if (!reMorphing && call->IsExpandedEarly() && call->IsVirtualVtable()) { if (!argx->OperIsLocal()) { thisArgEntry->needTmp = true; call->fgArgInfo->SetNeedsTemps(); } } assert(argIndex == 0); argIndex++; DEBUG_ARG_SLOTS_ONLY(argSlots++;) } // Note that this name is a bit of a misnomer - it indicates that there are struct args // that occupy more than a single slot that are passed by value (not necessarily in regs). bool hasMultiregStructArgs = false; for (args = call->gtCallArgs; args != nullptr; args = args->GetNext(), argIndex++) { GenTree** parentArgx = &args->NodeRef(); fgArgTabEntry* argEntry = call->fgArgInfo->GetArgEntry(argIndex, reMorphing); // Morph the arg node, and update the parent and argEntry pointers. argx = *parentArgx; argx = fgMorphTree(argx); *parentArgx = argx; assert(argx == args->GetNode()); DEBUG_ARG_SLOTS_ONLY(unsigned size = argEntry->getSize();) CORINFO_CLASS_HANDLE copyBlkClass = NO_CLASS_HANDLE; #if defined(DEBUG_ARG_SLOTS) if (!compMacOsArm64Abi()) { if (argEntry->GetByteAlignment() == 2 * TARGET_POINTER_SIZE) { if (argSlots % 2 == 1) { argSlots++; } } } #endif // DEBUG if (argEntry->isNonStandard() && argEntry->isPassedInRegisters()) { // We need to update the node field for this nonStandard arg here // as it may have been changed by the call to fgMorphTree. call->fgArgInfo->UpdateRegArg(argEntry, argx, reMorphing); flagsSummary |= argx->gtFlags; continue; } DEBUG_ARG_SLOTS_ASSERT(size != 0); DEBUG_ARG_SLOTS_ONLY(argSlots += argEntry->getSlotCount();) if (argx->IsLocalAddrExpr() != nullptr) { argx->gtType = TYP_I_IMPL; } // Get information about this argument. var_types hfaType = argEntry->GetHfaType(); bool isHfaArg = (hfaType != TYP_UNDEF); bool passUsingFloatRegs = argEntry->isPassedInFloatRegisters(); unsigned structSize = 0; // Struct arguments may be morphed into a node that is not a struct type. // In such case the fgArgTabEntry keeps track of whether the original node (before morphing) // was a struct and the struct classification. bool isStructArg = argEntry->isStruct; GenTree* argObj = argx->gtEffectiveVal(true /*commaOnly*/); if (isStructArg && varTypeIsStruct(argObj) && !argObj->OperIs(GT_ASG, GT_MKREFANY, GT_FIELD_LIST, GT_ARGPLACE)) { CORINFO_CLASS_HANDLE objClass = gtGetStructHandle(argObj); unsigned originalSize; if (argObj->TypeGet() == TYP_STRUCT) { if (argObj->OperIs(GT_OBJ)) { // Get the size off the OBJ node. originalSize = argObj->AsObj()->GetLayout()->GetSize(); assert(originalSize == info.compCompHnd->getClassSize(objClass)); } else { // We have a BADCODE assert for this in fgInitArgInfo. assert(argObj->OperIs(GT_LCL_VAR)); originalSize = lvaGetDesc(argObj->AsLclVarCommon())->lvExactSize; } } else { originalSize = genTypeSize(argx); assert(originalSize == info.compCompHnd->getClassSize(objClass)); } unsigned roundupSize = (unsigned)roundUp(originalSize, TARGET_POINTER_SIZE); var_types structBaseType = argEntry->argType; // First, handle the case where the argument is passed by reference. if (argEntry->passedByRef) { DEBUG_ARG_SLOTS_ASSERT(size == 1); copyBlkClass = objClass; #ifdef UNIX_AMD64_ABI assert(!"Structs are not passed by reference on x64/ux"); #endif // UNIX_AMD64_ABI } else // This is passed by value. { // Check to see if we can transform this into load of a primitive type. // 'size' must be the number of pointer sized items DEBUG_ARG_SLOTS_ASSERT(size == roundupSize / TARGET_POINTER_SIZE); structSize = originalSize; unsigned passingSize = originalSize; // Check to see if we can transform this struct load (GT_OBJ) into a GT_IND of the appropriate size. // When it can do this is platform-dependent: // - In general, it can be done for power of 2 structs that fit in a single register. // - For ARM and ARM64 it must also be a non-HFA struct, or have a single field. // - This is irrelevant for X86, since structs are always passed by value on the stack. GenTree* lclVar = fgIsIndirOfAddrOfLocal(argObj); bool canTransform = false; if (structBaseType != TYP_STRUCT) { if (isPow2(passingSize)) { canTransform = (!argEntry->IsHfaArg() || (passingSize == genTypeSize(argEntry->GetHfaType()))); } #if defined(TARGET_ARM64) || defined(UNIX_AMD64_ABI) // For ARM64 or AMD64/UX we can pass non-power-of-2 structs in a register, but we can // only transform in that case if the arg is a local. // TODO-CQ: This transformation should be applicable in general, not just for the ARM64 // or UNIX_AMD64_ABI cases where they will be passed in registers. else { canTransform = (lclVar != nullptr); passingSize = genTypeSize(structBaseType); } #endif // TARGET_ARM64 || UNIX_AMD64_ABI } if (!canTransform) { #if defined(TARGET_AMD64) #ifndef UNIX_AMD64_ABI // On Windows structs are always copied and passed by reference (handled above) unless they are // passed by value in a single register. assert(size == 1); copyBlkClass = objClass; #else // UNIX_AMD64_ABI // On Unix, structs are always passed by value. // We only need a copy if we have one of the following: // - The sizes don't match for a non-lclVar argument. // - We have a known struct type (e.g. SIMD) that requires multiple registers. // TODO-Amd64-Unix-Throughput: We don't need to keep the structDesc in the argEntry if it's not // actually passed in registers. if (argEntry->isPassedInRegisters()) { if (argObj->OperIs(GT_OBJ)) { if (passingSize != structSize) { copyBlkClass = objClass; } } else if (lclVar == nullptr) { // This should only be the case of a value directly producing a known struct type. assert(argObj->TypeGet() != TYP_STRUCT); if (argEntry->numRegs > 1) { copyBlkClass = objClass; } } } #endif // UNIX_AMD64_ABI #elif defined(TARGET_ARM64) if ((passingSize != structSize) && (lclVar == nullptr)) { copyBlkClass = objClass; } #endif #ifdef TARGET_ARM // TODO-1stClassStructs: Unify these conditions across targets. if (((lclVar != nullptr) && (lvaGetPromotionType(lclVar->AsLclVarCommon()->GetLclNum()) == PROMOTION_TYPE_INDEPENDENT)) || ((argObj->OperIs(GT_OBJ)) && (passingSize != structSize))) { copyBlkClass = objClass; } if (structSize < TARGET_POINTER_SIZE) { copyBlkClass = objClass; } #endif // TARGET_ARM } else { // We have a struct argument that fits into a register, and it is either a power of 2, // or a local. // Change our argument, as needed, into a value of the appropriate type. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_ARM DEBUG_ARG_SLOTS_ASSERT((size == 1) || ((structBaseType == TYP_DOUBLE) && (size == 2))); #else DEBUG_ARG_SLOTS_ASSERT((size == 1) || (varTypeIsSIMD(structBaseType) && size == (genTypeSize(structBaseType) / REGSIZE_BYTES))); #endif assert((structBaseType != TYP_STRUCT) && (genTypeSize(structBaseType) >= originalSize)); if (argObj->OperIs(GT_OBJ)) { argObj->ChangeOper(GT_IND); // Now see if we can fold *(&X) into X if (argObj->AsOp()->gtOp1->gtOper == GT_ADDR) { GenTree* temp = argObj->AsOp()->gtOp1->AsOp()->gtOp1; // Keep the DONT_CSE flag in sync // (as the addr always marks it for its op1) temp->gtFlags &= ~GTF_DONT_CSE; temp->gtFlags |= (argObj->gtFlags & GTF_DONT_CSE); DEBUG_DESTROY_NODE(argObj->AsOp()->gtOp1); // GT_ADDR DEBUG_DESTROY_NODE(argObj); // GT_IND argObj = temp; *parentArgx = temp; argx = temp; } } if (argObj->gtOper == GT_LCL_VAR) { unsigned lclNum = argObj->AsLclVarCommon()->GetLclNum(); LclVarDsc* varDsc = lvaGetDesc(lclNum); if (varDsc->lvPromoted) { if (varDsc->lvFieldCnt == 1) { // get the first and only promoted field LclVarDsc* fieldVarDsc = lvaGetDesc(varDsc->lvFieldLclStart); if (genTypeSize(fieldVarDsc->TypeGet()) >= originalSize) { // we will use the first and only promoted field argObj->AsLclVarCommon()->SetLclNum(varDsc->lvFieldLclStart); if (varTypeIsEnregisterable(fieldVarDsc->TypeGet()) && (genTypeSize(fieldVarDsc->TypeGet()) == originalSize)) { // Just use the existing field's type argObj->gtType = fieldVarDsc->TypeGet(); } else { // Can't use the existing field's type, so use GT_LCL_FLD to swizzle // to a new type lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::SwizzleArg)); argObj->ChangeOper(GT_LCL_FLD); argObj->gtType = structBaseType; } assert(varTypeIsEnregisterable(argObj->TypeGet())); assert(copyBlkClass == NO_CLASS_HANDLE); } else { // use GT_LCL_FLD to swizzle the single field struct to a new type lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::SwizzleArg)); argObj->ChangeOper(GT_LCL_FLD); argObj->gtType = structBaseType; } } else { // The struct fits into a single register, but it has been promoted into its // constituent fields, and so we have to re-assemble it copyBlkClass = objClass; } } else if (genTypeSize(varDsc->TypeGet()) != genTypeSize(structBaseType)) { // Not a promoted struct, so just swizzle the type by using GT_LCL_FLD lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::SwizzleArg)); argObj->ChangeOper(GT_LCL_FLD); argObj->gtType = structBaseType; } } else { // Not a GT_LCL_VAR, so we can just change the type on the node argObj->gtType = structBaseType; } assert(varTypeIsEnregisterable(argObj->TypeGet()) || ((copyBlkClass != NO_CLASS_HANDLE) && varTypeIsEnregisterable(structBaseType))); } #if !defined(UNIX_AMD64_ABI) && !defined(TARGET_ARMARCH) // TODO-CQ-XARCH: there is no need for a temp copy if we improve our code generation in // `genPutStructArgStk` for xarch like we did it for Arm/Arm64. // We still have a struct unless we converted the GT_OBJ into a GT_IND above... if (isHfaArg && passUsingFloatRegs) { } else if (structBaseType == TYP_STRUCT) { // If the valuetype size is not a multiple of TARGET_POINTER_SIZE, // we must copyblk to a temp before doing the obj to avoid // the obj reading memory past the end of the valuetype CLANG_FORMAT_COMMENT_ANCHOR; if (roundupSize > originalSize) { copyBlkClass = objClass; // There are a few special cases where we can omit using a CopyBlk // where we normally would need to use one. if (argObj->OperIs(GT_OBJ) && argObj->AsObj()->gtGetOp1()->IsLocalAddrExpr() != nullptr) // Is the source a LclVar? { copyBlkClass = NO_CLASS_HANDLE; } } } #endif // !UNIX_AMD64_ABI } } if (argEntry->isPassedInRegisters()) { call->fgArgInfo->UpdateRegArg(argEntry, argx, reMorphing); } else { call->fgArgInfo->UpdateStkArg(argEntry, argx, reMorphing); } if (copyBlkClass != NO_CLASS_HANDLE) { fgMakeOutgoingStructArgCopy(call, args, copyBlkClass); } if (argx->gtOper == GT_MKREFANY) { // 'Lower' the MKREFANY tree and insert it. noway_assert(!reMorphing); #ifdef TARGET_X86 // Build the mkrefany as a GT_FIELD_LIST GenTreeFieldList* fieldList = new (this, GT_FIELD_LIST) GenTreeFieldList(); fieldList->AddField(this, argx->AsOp()->gtGetOp1(), OFFSETOF__CORINFO_TypedReference__dataPtr, TYP_BYREF); fieldList->AddField(this, argx->AsOp()->gtGetOp2(), OFFSETOF__CORINFO_TypedReference__type, TYP_I_IMPL); fgArgTabEntry* fp = gtArgEntryByNode(call, argx); args->SetNode(fieldList); assert(fp->GetNode() == fieldList); #else // !TARGET_X86 // Get a new temp // Here we don't need unsafe value cls check since the addr of temp is used only in mkrefany unsigned tmp = lvaGrabTemp(true DEBUGARG("by-value mkrefany struct argument")); lvaSetStruct(tmp, impGetRefAnyClass(), false); // Build the mkrefany as a comma node: // (tmp.ptr=argx),(tmp.type=handle) GenTreeLclFld* destPtrSlot = gtNewLclFldNode(tmp, TYP_I_IMPL, OFFSETOF__CORINFO_TypedReference__dataPtr); GenTreeLclFld* destTypeSlot = gtNewLclFldNode(tmp, TYP_I_IMPL, OFFSETOF__CORINFO_TypedReference__type); destPtrSlot->SetFieldSeq(GetFieldSeqStore()->CreateSingleton(GetRefanyDataField())); destPtrSlot->gtFlags |= GTF_VAR_DEF; destTypeSlot->SetFieldSeq(GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField())); destTypeSlot->gtFlags |= GTF_VAR_DEF; GenTree* asgPtrSlot = gtNewAssignNode(destPtrSlot, argx->AsOp()->gtOp1); GenTree* asgTypeSlot = gtNewAssignNode(destTypeSlot, argx->AsOp()->gtOp2); GenTree* asg = gtNewOperNode(GT_COMMA, TYP_VOID, asgPtrSlot, asgTypeSlot); // Change the expression to "(tmp=val)" args->SetNode(asg); // EvalArgsToTemps will cause tmp to actually get loaded as the argument call->fgArgInfo->EvalToTmp(argEntry, tmp, asg); lvaSetVarAddrExposed(tmp DEBUGARG(AddressExposedReason::TOO_CONSERVATIVE)); #endif // !TARGET_X86 } #if FEATURE_MULTIREG_ARGS if (isStructArg) { if (((argEntry->numRegs + argEntry->GetStackSlotsNumber()) > 1) || (isHfaArg && argx->TypeGet() == TYP_STRUCT)) { hasMultiregStructArgs = true; } } #ifdef TARGET_ARM else if ((argEntry->argType == TYP_LONG) || (argEntry->argType == TYP_DOUBLE)) { assert((argEntry->numRegs == 2) || (argEntry->numSlots == 2)); } #endif else { // We must have exactly one register or slot. assert(((argEntry->numRegs == 1) && (argEntry->GetStackSlotsNumber() == 0)) || ((argEntry->numRegs == 0) && (argEntry->GetStackSlotsNumber() == 1))); } #endif #if defined(TARGET_X86) if (isStructArg) { GenTree* lclNode = argx->OperIs(GT_LCL_VAR) ? argx : fgIsIndirOfAddrOfLocal(argx); if ((lclNode != nullptr) && (lvaGetPromotionType(lclNode->AsLclVarCommon()->GetLclNum()) == Compiler::PROMOTION_TYPE_INDEPENDENT)) { // Make a GT_FIELD_LIST of the field lclVars. GenTreeLclVarCommon* lcl = lclNode->AsLclVarCommon(); LclVarDsc* varDsc = lvaGetDesc(lcl); GenTreeFieldList* fieldList = new (this, GT_FIELD_LIST) GenTreeFieldList(); fgArgTabEntry* fp = gtArgEntryByNode(call, argx); args->SetNode(fieldList); assert(fp->GetNode() == fieldList); for (unsigned fieldLclNum = varDsc->lvFieldLclStart; fieldLclNum < varDsc->lvFieldLclStart + varDsc->lvFieldCnt; ++fieldLclNum) { LclVarDsc* fieldVarDsc = lvaGetDesc(fieldLclNum); GenTree* fieldLcl; if (fieldLclNum == varDsc->lvFieldLclStart) { lcl->SetLclNum(fieldLclNum); lcl->SetOperResetFlags(GT_LCL_VAR); lcl->gtType = fieldVarDsc->TypeGet(); fieldLcl = lcl; } else { fieldLcl = gtNewLclvNode(fieldLclNum, fieldVarDsc->TypeGet()); } fieldList->AddField(this, fieldLcl, fieldVarDsc->lvFldOffset, fieldVarDsc->TypeGet()); } } } #endif // TARGET_X86 flagsSummary |= args->GetNode()->gtFlags; } // end foreach argument loop if (!reMorphing) { call->fgArgInfo->ArgsComplete(); } /* Process the function address, if indirect call */ if (call->gtCallType == CT_INDIRECT) { call->gtCallAddr = fgMorphTree(call->gtCallAddr); // Const CSE may create an assignment node here flagsSummary |= call->gtCallAddr->gtFlags; } #if FEATURE_FIXED_OUT_ARGS // Record the outgoing argument size. If the call is a fast tail // call, it will setup its arguments in incoming arg area instead // of the out-going arg area, so we don't need to track the // outgoing arg size. if (!call->IsFastTailCall()) { #if defined(UNIX_AMD64_ABI) // This is currently required for the UNIX ABI to work correctly. opts.compNeedToAlignFrame = true; #endif // UNIX_AMD64_ABI const unsigned outgoingArgSpaceSize = GetOutgoingArgByteSize(call->fgArgInfo->GetNextSlotByteOffset()); #if defined(DEBUG_ARG_SLOTS) unsigned preallocatedArgCount = 0; if (!compMacOsArm64Abi()) { preallocatedArgCount = call->fgArgInfo->GetNextSlotNum(); assert(outgoingArgSpaceSize == preallocatedArgCount * REGSIZE_BYTES); } #endif call->fgArgInfo->SetOutArgSize(max(outgoingArgSpaceSize, MIN_ARG_AREA_FOR_CALL)); #ifdef DEBUG if (verbose) { const fgArgInfo* argInfo = call->fgArgInfo; #if defined(DEBUG_ARG_SLOTS) if (!compMacOsArm64Abi()) { printf("argSlots=%d, preallocatedArgCount=%d, nextSlotNum=%d, nextSlotByteOffset=%d, " "outgoingArgSpaceSize=%d\n", argSlots, preallocatedArgCount, argInfo->GetNextSlotNum(), argInfo->GetNextSlotByteOffset(), outgoingArgSpaceSize); } else { printf("nextSlotByteOffset=%d, outgoingArgSpaceSize=%d\n", argInfo->GetNextSlotByteOffset(), outgoingArgSpaceSize); } #else printf("nextSlotByteOffset=%d, outgoingArgSpaceSize=%d\n", argInfo->GetNextSlotByteOffset(), outgoingArgSpaceSize); #endif } #endif } #endif // FEATURE_FIXED_OUT_ARGS // Clear the ASG and EXCEPT (if possible) flags on the call node call->gtFlags &= ~GTF_ASG; if (!call->OperMayThrow(this)) { call->gtFlags &= ~GTF_EXCEPT; } // Union in the side effect flags from the call's operands call->gtFlags |= flagsSummary & GTF_ALL_EFFECT; // If we are remorphing or don't have any register arguments or other arguments that need // temps, then we don't need to call SortArgs() and EvalArgsToTemps(). // if (!reMorphing && (call->fgArgInfo->HasRegArgs() || call->fgArgInfo->NeedsTemps())) { // Do the 'defer or eval to temp' analysis. call->fgArgInfo->SortArgs(); call->fgArgInfo->EvalArgsToTemps(); } if (hasMultiregStructArgs) { fgMorphMultiregStructArgs(call); } #ifdef DEBUG if (verbose) { JITDUMP("ArgTable for %d.%s after fgMorphArgs:\n", call->gtTreeID, GenTree::OpName(call->gtOper)); call->fgArgInfo->Dump(this); JITDUMP("\n"); } #endif return call; } #ifdef _PREFAST_ #pragma warning(pop) #endif //----------------------------------------------------------------------------- // fgMorphMultiregStructArgs: Locate the TYP_STRUCT arguments and // call fgMorphMultiregStructArg on each of them. // // Arguments: // call : a GenTreeCall node that has one or more TYP_STRUCT arguments\. // // Notes: // We only call fgMorphMultiregStructArg for struct arguments that are not passed as simple types. // It will ensure that the struct arguments are in the correct form. // If this method fails to find any TYP_STRUCT arguments it will assert. // void Compiler::fgMorphMultiregStructArgs(GenTreeCall* call) { bool foundStructArg = false; GenTreeFlags flagsSummary = GTF_EMPTY; #ifdef TARGET_X86 assert(!"Logic error: no MultiregStructArgs for X86"); #endif #if defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI) assert(!"Logic error: no MultiregStructArgs for Windows X64 ABI"); #endif for (GenTreeCall::Use& use : call->Args()) { // For late arguments the arg tree that is overridden is in the gtCallLateArgs list. // For such late args the gtCallArgList contains the setup arg node (evaluating the arg.) // The tree from the gtCallLateArgs list is passed to the callee. The fgArgEntry node contains the mapping // between the nodes in both lists. If the arg is not a late arg, the fgArgEntry->node points to itself, // otherwise points to the list in the late args list. bool isLateArg = (use.GetNode()->gtFlags & GTF_LATE_ARG) != 0; fgArgTabEntry* fgEntryPtr = gtArgEntryByNode(call, use.GetNode()); assert(fgEntryPtr != nullptr); GenTree* argx = fgEntryPtr->GetNode(); GenTreeCall::Use* lateUse = nullptr; GenTree* lateNode = nullptr; if (isLateArg) { for (GenTreeCall::Use& lateArgUse : call->LateArgs()) { GenTree* argNode = lateArgUse.GetNode(); if (argx == argNode) { lateUse = &lateArgUse; lateNode = argNode; break; } } assert((lateUse != nullptr) && (lateNode != nullptr)); } if (!fgEntryPtr->isStruct) { continue; } unsigned size = (fgEntryPtr->numRegs + fgEntryPtr->GetStackSlotsNumber()); if ((size > 1) || (fgEntryPtr->IsHfaArg() && argx->TypeGet() == TYP_STRUCT)) { foundStructArg = true; if (varTypeIsStruct(argx) && !argx->OperIs(GT_FIELD_LIST)) { if (fgEntryPtr->IsHfaRegArg()) { var_types hfaType = fgEntryPtr->GetHfaType(); unsigned structSize; if (argx->OperIs(GT_OBJ)) { structSize = argx->AsObj()->GetLayout()->GetSize(); } else if (varTypeIsSIMD(argx)) { structSize = genTypeSize(argx); } else { assert(argx->OperIs(GT_LCL_VAR)); structSize = lvaGetDesc(argx->AsLclVar())->lvExactSize; } assert(structSize > 0); if (structSize == genTypeSize(hfaType)) { if (argx->OperIs(GT_OBJ)) { argx->SetOper(GT_IND); } argx->gtType = hfaType; } } GenTree* newArgx = fgMorphMultiregStructArg(argx, fgEntryPtr); // Did we replace 'argx' with a new tree? if (newArgx != argx) { // link the new arg node into either the late arg list or the gtCallArgs list if (isLateArg) { lateUse->SetNode(newArgx); } else { use.SetNode(newArgx); } assert(fgEntryPtr->GetNode() == newArgx); } } } } // We should only call this method when we actually have one or more multireg struct args assert(foundStructArg); // Update the flags call->gtFlags |= (flagsSummary & GTF_ALL_EFFECT); } //----------------------------------------------------------------------------- // fgMorphMultiregStructArg: Given a TYP_STRUCT arg from a call argument list, // morph the argument as needed to be passed correctly. // // Arguments: // arg - A GenTree node containing a TYP_STRUCT arg // fgEntryPtr - the fgArgTabEntry information for the current 'arg' // // Notes: // The arg must be a GT_OBJ or GT_LCL_VAR or GT_LCL_FLD of TYP_STRUCT. // If 'arg' is a lclVar passed on the stack, we will ensure that any lclVars that must be on the // stack are marked as doNotEnregister, and then we return. // // If it is passed by register, we mutate the argument into the GT_FIELD_LIST form // which is only used for struct arguments. // // If arg is a LclVar we check if it is struct promoted and has the right number of fields // and if they are at the appropriate offsets we will use the struct promted fields // in the GT_FIELD_LIST nodes that we create. // If we have a GT_LCL_VAR that isn't struct promoted or doesn't meet the requirements // we will use a set of GT_LCL_FLDs nodes to access the various portions of the struct // this also forces the struct to be stack allocated into the local frame. // For the GT_OBJ case will clone the address expression and generate two (or more) // indirections. // Currently the implementation handles ARM64/ARM and will NYI for other architectures. // GenTree* Compiler::fgMorphMultiregStructArg(GenTree* arg, fgArgTabEntry* fgEntryPtr) { assert(varTypeIsStruct(arg->TypeGet())); #if !defined(TARGET_ARMARCH) && !defined(UNIX_AMD64_ABI) NYI("fgMorphMultiregStructArg requires implementation for this target"); #endif #ifdef TARGET_ARM if ((fgEntryPtr->IsSplit() && fgEntryPtr->GetStackSlotsNumber() + fgEntryPtr->numRegs > 4) || (!fgEntryPtr->IsSplit() && fgEntryPtr->GetRegNum() == REG_STK)) #else if (fgEntryPtr->GetRegNum() == REG_STK) #endif { GenTreeLclVarCommon* lcl = nullptr; GenTree* actualArg = arg->gtEffectiveVal(); if (actualArg->OperGet() == GT_OBJ) { if (actualArg->gtGetOp1()->OperIs(GT_ADDR) && actualArg->gtGetOp1()->gtGetOp1()->OperIs(GT_LCL_VAR)) { lcl = actualArg->gtGetOp1()->gtGetOp1()->AsLclVarCommon(); } } else if (actualArg->OperGet() == GT_LCL_VAR) { lcl = actualArg->AsLclVarCommon(); } if (lcl != nullptr) { if (lvaGetPromotionType(lcl->GetLclNum()) == PROMOTION_TYPE_INDEPENDENT) { arg = fgMorphLclArgToFieldlist(lcl); } else if (arg->TypeGet() == TYP_STRUCT) { // If this is a non-register struct, it must be referenced from memory. if (!actualArg->OperIs(GT_OBJ)) { // Create an Obj of the temp to use it as a call argument. arg = gtNewOperNode(GT_ADDR, TYP_I_IMPL, arg); arg = gtNewObjNode(lvaGetStruct(lcl->GetLclNum()), arg); } // Its fields will need to be accessed by address. lvaSetVarDoNotEnregister(lcl->GetLclNum() DEBUG_ARG(DoNotEnregisterReason::IsStructArg)); } } return arg; } #if FEATURE_MULTIREG_ARGS // Examine 'arg' and setup argValue objClass and structSize // const CORINFO_CLASS_HANDLE objClass = gtGetStructHandle(arg); GenTree* argValue = arg; // normally argValue will be arg, but see right below unsigned structSize = 0; if (arg->TypeGet() != TYP_STRUCT) { structSize = genTypeSize(arg->TypeGet()); assert(structSize == info.compCompHnd->getClassSize(objClass)); } else if (arg->OperGet() == GT_OBJ) { GenTreeObj* argObj = arg->AsObj(); const ClassLayout* objLayout = argObj->GetLayout(); structSize = objLayout->GetSize(); assert(structSize == info.compCompHnd->getClassSize(objClass)); // If we have a GT_OBJ of a GT_ADDR then we set argValue to the child node of the GT_ADDR. GenTree* op1 = argObj->gtOp1; if (op1->OperGet() == GT_ADDR) { GenTree* underlyingTree = op1->AsOp()->gtOp1; // Only update to the same type. if (underlyingTree->OperIs(GT_LCL_VAR)) { const LclVarDsc* varDsc = lvaGetDesc(underlyingTree->AsLclVar()); if (ClassLayout::AreCompatible(varDsc->GetLayout(), objLayout)) { argValue = underlyingTree; } } } } else if (arg->OperGet() == GT_LCL_VAR) { LclVarDsc* varDsc = lvaGetDesc(arg->AsLclVarCommon()); structSize = varDsc->lvExactSize; assert(structSize == info.compCompHnd->getClassSize(objClass)); } else { structSize = info.compCompHnd->getClassSize(objClass); } var_types hfaType = TYP_UNDEF; var_types elemType = TYP_UNDEF; unsigned elemCount = 0; unsigned elemSize = 0; var_types type[MAX_ARG_REG_COUNT] = {}; // TYP_UNDEF = 0 hfaType = fgEntryPtr->GetHfaType(); if (varTypeIsValidHfaType(hfaType) && fgEntryPtr->isPassedInFloatRegisters()) { elemType = hfaType; elemSize = genTypeSize(elemType); elemCount = structSize / elemSize; assert(elemSize * elemCount == structSize); for (unsigned inx = 0; inx < elemCount; inx++) { type[inx] = elemType; } } else { assert(structSize <= MAX_ARG_REG_COUNT * TARGET_POINTER_SIZE); BYTE gcPtrs[MAX_ARG_REG_COUNT]; elemCount = roundUp(structSize, TARGET_POINTER_SIZE) / TARGET_POINTER_SIZE; info.compCompHnd->getClassGClayout(objClass, &gcPtrs[0]); for (unsigned inx = 0; inx < elemCount; inx++) { #ifdef UNIX_AMD64_ABI if (gcPtrs[inx] == TYPE_GC_NONE) { type[inx] = GetTypeFromClassificationAndSizes(fgEntryPtr->structDesc.eightByteClassifications[inx], fgEntryPtr->structDesc.eightByteSizes[inx]); } else #endif // UNIX_AMD64_ABI { type[inx] = getJitGCType(gcPtrs[inx]); } } #ifndef UNIX_AMD64_ABI if ((argValue->OperGet() == GT_LCL_FLD) || (argValue->OperGet() == GT_LCL_VAR)) { elemSize = TARGET_POINTER_SIZE; // We can safely widen this to aligned bytes since we are loading from // a GT_LCL_VAR or a GT_LCL_FLD which is properly padded and // lives in the stack frame or will be a promoted field. // structSize = elemCount * TARGET_POINTER_SIZE; } else // we must have a GT_OBJ { assert(argValue->OperGet() == GT_OBJ); // We need to load the struct from an arbitrary address // and we can't read past the end of the structSize // We adjust the last load type here // unsigned remainingBytes = structSize % TARGET_POINTER_SIZE; unsigned lastElem = elemCount - 1; if (remainingBytes != 0) { switch (remainingBytes) { case 1: type[lastElem] = TYP_BYTE; break; case 2: type[lastElem] = TYP_SHORT; break; #if defined(TARGET_ARM64) || defined(UNIX_AMD64_ABI) case 4: type[lastElem] = TYP_INT; break; #endif // (TARGET_ARM64) || (UNIX_AMD64_ABI) default: noway_assert(!"NYI: odd sized struct in fgMorphMultiregStructArg"); break; } } } #endif // !UNIX_AMD64_ABI } // We should still have a TYP_STRUCT assert(varTypeIsStruct(argValue->TypeGet())); GenTreeFieldList* newArg = nullptr; // Are we passing a struct LclVar? // if (argValue->OperGet() == GT_LCL_VAR) { GenTreeLclVarCommon* varNode = argValue->AsLclVarCommon(); unsigned varNum = varNode->GetLclNum(); LclVarDsc* varDsc = lvaGetDesc(varNum); // At this point any TYP_STRUCT LclVar must be an aligned struct // or an HFA struct, both which are passed by value. // assert((varDsc->lvSize() == elemCount * TARGET_POINTER_SIZE) || varDsc->lvIsHfa()); varDsc->lvIsMultiRegArg = true; #ifdef DEBUG if (verbose) { JITDUMP("Multireg struct argument V%02u : ", varNum); fgEntryPtr->Dump(); } #endif // DEBUG #ifndef UNIX_AMD64_ABI // This local variable must match the layout of the 'objClass' type exactly if (varDsc->lvIsHfa() && fgEntryPtr->isPassedInFloatRegisters()) { // We have a HFA struct. noway_assert(elemType == varDsc->GetHfaType()); noway_assert(elemSize == genTypeSize(elemType)); noway_assert(elemCount == (varDsc->lvExactSize / elemSize)); noway_assert(elemSize * elemCount == varDsc->lvExactSize); for (unsigned inx = 0; (inx < elemCount); inx++) { noway_assert(type[inx] == elemType); } } else { #if defined(TARGET_ARM64) // We must have a 16-byte struct (non-HFA) noway_assert(elemCount == 2); #elif defined(TARGET_ARM) noway_assert(elemCount <= 4); #endif for (unsigned inx = 0; inx < elemCount; inx++) { var_types currentGcLayoutType = varDsc->GetLayout()->GetGCPtrType(inx); // We setup the type[inx] value above using the GC info from 'objClass' // This GT_LCL_VAR must have the same GC layout info // if (varTypeIsGC(currentGcLayoutType)) { noway_assert(type[inx] == currentGcLayoutType); } else { // We may have use a small type when we setup the type[inx] values above // We can safely widen this to TYP_I_IMPL type[inx] = TYP_I_IMPL; } } } if (varDsc->lvPromoted && varDsc->lvIsHfa() && fgEntryPtr->isPassedInFloatRegisters()) { bool canMorphToFieldList = true; for (unsigned fldOffset = 0; fldOffset < varDsc->lvExactSize; fldOffset += elemSize) { const unsigned fldVarNum = lvaGetFieldLocal(varDsc, fldOffset); if ((fldVarNum == BAD_VAR_NUM) || !varTypeUsesFloatReg(lvaGetDesc(fldVarNum))) { canMorphToFieldList = false; break; } } if (canMorphToFieldList) { newArg = fgMorphLclArgToFieldlist(varNode); } } else #endif // !UNIX_AMD64_ABI #if defined(TARGET_ARM64) || defined(UNIX_AMD64_ABI) // Is this LclVar a promoted struct with exactly 2 fields? if (varDsc->lvPromoted && (varDsc->lvFieldCnt == 2) && !varDsc->lvIsHfa()) { // See if we have two promoted fields that start at offset 0 and 8? unsigned loVarNum = lvaGetFieldLocal(varDsc, 0); unsigned hiVarNum = lvaGetFieldLocal(varDsc, TARGET_POINTER_SIZE); // Did we find the promoted fields at the necessary offsets? if ((loVarNum != BAD_VAR_NUM) && (hiVarNum != BAD_VAR_NUM)) { LclVarDsc* loVarDsc = lvaGetDesc(loVarNum); LclVarDsc* hiVarDsc = lvaGetDesc(hiVarNum); var_types loType = loVarDsc->lvType; var_types hiType = hiVarDsc->lvType; if ((varTypeIsFloating(loType) != genIsValidFloatReg(fgEntryPtr->GetRegNum(0))) || (varTypeIsFloating(hiType) != genIsValidFloatReg(fgEntryPtr->GetRegNum(1)))) { // TODO-LSRA - It currently doesn't support the passing of floating point LCL_VARS in the integer // registers. So for now we will use GT_LCLFLD's to pass this struct (it won't be enregistered) // JITDUMP("Multireg struct V%02u will be passed using GT_LCLFLD because it has float fields.\n", varNum); // // we call lvaSetVarDoNotEnregister and do the proper transformation below. // } else { // We can use the struct promoted field as the two arguments // Create a new tree for 'arg' // replace the existing LDOBJ(ADDR(LCLVAR)) // with a FIELD_LIST(LCLVAR-LO, FIELD_LIST(LCLVAR-HI, nullptr)) // newArg = new (this, GT_FIELD_LIST) GenTreeFieldList(); newArg->AddField(this, gtNewLclvNode(loVarNum, loType), 0, loType); newArg->AddField(this, gtNewLclvNode(hiVarNum, hiType), TARGET_POINTER_SIZE, hiType); } } } else { // // We will create a list of GT_LCL_FLDs nodes to pass this struct // lvaSetVarDoNotEnregister(varNum DEBUG_ARG(DoNotEnregisterReason::LocalField)); } #elif defined(TARGET_ARM) // Is this LclVar a promoted struct with exactly same size? if (varDsc->lvPromoted && (varDsc->lvFieldCnt == elemCount) && !varDsc->lvIsHfa()) { // See if we have promoted fields? unsigned varNums[4]; bool hasBadVarNum = false; for (unsigned inx = 0; inx < elemCount; inx++) { varNums[inx] = lvaGetFieldLocal(varDsc, TARGET_POINTER_SIZE * inx); if (varNums[inx] == BAD_VAR_NUM) { hasBadVarNum = true; break; } } // Did we find the promoted fields at the necessary offsets? if (!hasBadVarNum) { LclVarDsc* varDscs[4]; var_types varType[4]; bool varIsFloat = false; for (unsigned inx = 0; inx < elemCount; inx++) { varDscs[inx] = lvaGetDesc(varNums[inx]); varType[inx] = varDscs[inx]->lvType; if (varTypeIsFloating(varType[inx])) { // TODO-LSRA - It currently doesn't support the passing of floating point LCL_VARS in the // integer // registers. So for now we will use GT_LCLFLD's to pass this struct (it won't be enregistered) // JITDUMP("Multireg struct V%02u will be passed using GT_LCLFLD because it has float fields.\n", varNum); // // we call lvaSetVarDoNotEnregister and do the proper transformation below. // varIsFloat = true; break; } } if (!varIsFloat) { newArg = fgMorphLclArgToFieldlist(varNode); } } } else { // // We will create a list of GT_LCL_FLDs nodes to pass this struct // lvaSetVarDoNotEnregister(varNum DEBUG_ARG(DoNotEnregisterReason::LocalField)); } #endif // TARGET_ARM } // If we didn't set newarg to a new List Node tree // if (newArg == nullptr) { if (fgEntryPtr->GetRegNum() == REG_STK) { // We leave this stack passed argument alone return arg; } // Are we passing a GT_LCL_FLD (or a GT_LCL_VAR that was not struct promoted ) // A GT_LCL_FLD could also contain a 16-byte struct or HFA struct inside it? // if ((argValue->OperGet() == GT_LCL_FLD) || (argValue->OperGet() == GT_LCL_VAR)) { GenTreeLclVarCommon* varNode = argValue->AsLclVarCommon(); unsigned varNum = varNode->GetLclNum(); LclVarDsc* varDsc = lvaGetDesc(varNum); unsigned baseOffset = varNode->GetLclOffs(); unsigned lastOffset = baseOffset + structSize; // The allocated size of our LocalVar must be at least as big as lastOffset assert(varDsc->lvSize() >= lastOffset); if (varDsc->HasGCPtr()) { // alignment of the baseOffset is required noway_assert((baseOffset % TARGET_POINTER_SIZE) == 0); #ifndef UNIX_AMD64_ABI noway_assert(elemSize == TARGET_POINTER_SIZE); #endif unsigned baseIndex = baseOffset / TARGET_POINTER_SIZE; ClassLayout* layout = varDsc->GetLayout(); for (unsigned inx = 0; (inx < elemCount); inx++) { // The GC information must match what we setup using 'objClass' if (layout->IsGCPtr(baseIndex + inx) || varTypeGCtype(type[inx])) { noway_assert(type[inx] == layout->GetGCPtrType(baseIndex + inx)); } } } else // this varDsc contains no GC pointers { for (unsigned inx = 0; inx < elemCount; inx++) { // The GC information must match what we setup using 'objClass' noway_assert(!varTypeIsGC(type[inx])); } } // // We create a list of GT_LCL_FLDs nodes to pass this struct // lvaSetVarDoNotEnregister(varNum DEBUG_ARG(DoNotEnregisterReason::LocalField)); // Create a new tree for 'arg' // replace the existing LDOBJ(ADDR(LCLVAR)) // with a FIELD_LIST(LCLFLD-LO, LCLFLD-HI) // unsigned offset = baseOffset; newArg = new (this, GT_FIELD_LIST) GenTreeFieldList(); for (unsigned inx = 0; inx < elemCount; inx++) { GenTree* nextLclFld = gtNewLclFldNode(varNum, type[inx], offset); newArg->AddField(this, nextLclFld, offset, type[inx]); offset += genTypeSize(type[inx]); } } // Are we passing a GT_OBJ struct? // else if (argValue->OperGet() == GT_OBJ) { GenTreeObj* argObj = argValue->AsObj(); GenTree* baseAddr = argObj->gtOp1; var_types addrType = baseAddr->TypeGet(); if (baseAddr->OperGet() == GT_ADDR) { GenTree* addrTaken = baseAddr->AsOp()->gtOp1; if (addrTaken->IsLocal()) { GenTreeLclVarCommon* varNode = addrTaken->AsLclVarCommon(); unsigned varNum = varNode->GetLclNum(); // We access non-struct type (for example, long) as a struct type. // Make sure lclVar lives on stack to make sure its fields are accessible by address. lvaSetVarDoNotEnregister(varNum DEBUGARG(DoNotEnregisterReason::LocalField)); } } // Create a new tree for 'arg' // replace the existing LDOBJ(EXPR) // with a FIELD_LIST(IND(EXPR), FIELD_LIST(IND(EXPR+8), nullptr) ...) // newArg = new (this, GT_FIELD_LIST) GenTreeFieldList(); unsigned offset = 0; for (unsigned inx = 0; inx < elemCount; inx++) { GenTree* curAddr = baseAddr; if (offset != 0) { GenTree* baseAddrDup = gtCloneExpr(baseAddr); noway_assert(baseAddrDup != nullptr); curAddr = gtNewOperNode(GT_ADD, addrType, baseAddrDup, gtNewIconNode(offset, TYP_I_IMPL)); } else { curAddr = baseAddr; } GenTree* curItem = gtNewIndir(type[inx], curAddr); // For safety all GT_IND should have at least GT_GLOB_REF set. curItem->gtFlags |= GTF_GLOB_REF; newArg->AddField(this, curItem, offset, type[inx]); offset += genTypeSize(type[inx]); } } } #ifdef DEBUG // If we reach here we should have set newArg to something if (newArg == nullptr) { gtDispTree(argValue); assert(!"Missing case in fgMorphMultiregStructArg"); } #endif noway_assert(newArg != nullptr); #ifdef DEBUG if (verbose) { printf("fgMorphMultiregStructArg created tree:\n"); gtDispTree(newArg); } #endif arg = newArg; // consider calling fgMorphTree(newArg); #endif // FEATURE_MULTIREG_ARGS return arg; } //------------------------------------------------------------------------ // fgMorphLclArgToFieldlist: Morph a GT_LCL_VAR node to a GT_FIELD_LIST of its promoted fields // // Arguments: // lcl - The GT_LCL_VAR node we will transform // // Return value: // The new GT_FIELD_LIST that we have created. // GenTreeFieldList* Compiler::fgMorphLclArgToFieldlist(GenTreeLclVarCommon* lcl) { LclVarDsc* varDsc = lvaGetDesc(lcl); assert(varDsc->lvPromoted); unsigned fieldCount = varDsc->lvFieldCnt; unsigned fieldLclNum = varDsc->lvFieldLclStart; GenTreeFieldList* fieldList = new (this, GT_FIELD_LIST) GenTreeFieldList(); for (unsigned i = 0; i < fieldCount; i++) { LclVarDsc* fieldVarDsc = lvaGetDesc(fieldLclNum); GenTree* lclVar = gtNewLclvNode(fieldLclNum, fieldVarDsc->TypeGet()); fieldList->AddField(this, lclVar, fieldVarDsc->lvFldOffset, fieldVarDsc->TypeGet()); fieldLclNum++; } return fieldList; } //------------------------------------------------------------------------ // fgMakeOutgoingStructArgCopy: make a copy of a struct variable if necessary, // to pass to a callee. // // Arguments: // call - call being processed // args - args for the call // copyBlkClass - class handle for the struct // // The arg is updated if necessary with the copy. // void Compiler::fgMakeOutgoingStructArgCopy(GenTreeCall* call, GenTreeCall::Use* args, CORINFO_CLASS_HANDLE copyBlkClass) { GenTree* argx = args->GetNode(); noway_assert(argx->gtOper != GT_MKREFANY); fgArgTabEntry* argEntry = Compiler::gtArgEntryByNode(call, argx); // If we're optimizing, see if we can avoid making a copy. // // We don't need a copy if this is the last use of an implicit by-ref local. // if (opts.OptimizationEnabled()) { GenTreeLclVar* const lcl = argx->IsImplicitByrefParameterValue(this); if (lcl != nullptr) { const unsigned varNum = lcl->GetLclNum(); LclVarDsc* const varDsc = lvaGetDesc(varNum); const unsigned short totalAppearances = varDsc->lvRefCnt(RCS_EARLY); // We don't have liveness so we rely on other indications of last use. // // We handle these cases: // // * (must not copy) If the call is a tail call, the use is a last use. // We must skip the copy if we have a fast tail call. // // * (may not copy) if the call is noreturn, the use is a last use. // We also check for just one reference here as we are not doing // alias analysis of the call's parameters, or checking if the call // site is not within some try region. // // * (may not copy) if there is exactly one use of the local in the method, // and the call is not in loop, this is a last use. // // fgMightHaveLoop() is expensive; check it last, only if necessary. // if (call->IsTailCall() || // ((totalAppearances == 1) && call->IsNoReturn()) || // ((totalAppearances == 1) && !fgMightHaveLoop())) { args->SetNode(lcl); assert(argEntry->GetNode() == lcl); JITDUMP("did not need to make outgoing copy for last use of implicit byref V%2d\n", varNum); return; } } } JITDUMP("making an outgoing copy for struct arg\n"); if (fgOutgoingArgTemps == nullptr) { fgOutgoingArgTemps = hashBv::Create(this); } unsigned tmp = 0; bool found = false; // Attempt to find a local we have already used for an outgoing struct and reuse it. // We do not reuse within a statement. if (!opts.MinOpts()) { indexType lclNum; FOREACH_HBV_BIT_SET(lclNum, fgOutgoingArgTemps) { LclVarDsc* varDsc = lvaGetDesc((unsigned)lclNum); if (typeInfo::AreEquivalent(varDsc->lvVerTypeInfo, typeInfo(TI_STRUCT, copyBlkClass)) && !fgCurrentlyInUseArgTemps->testBit(lclNum)) { tmp = (unsigned)lclNum; found = true; JITDUMP("reusing outgoing struct arg"); break; } } NEXT_HBV_BIT_SET; } // Create the CopyBlk tree and insert it. if (!found) { // Get a new temp // Here We don't need unsafe value cls check, since the addr of this temp is used only in copyblk. tmp = lvaGrabTemp(true DEBUGARG("by-value struct argument")); lvaSetStruct(tmp, copyBlkClass, false); if (call->IsVarargs()) { lvaSetStructUsedAsVarArg(tmp); } fgOutgoingArgTemps->setBit(tmp); } fgCurrentlyInUseArgTemps->setBit(tmp); // TYP_SIMD structs should not be enregistered, since ABI requires it to be // allocated on stack and address of it needs to be passed. if (lclVarIsSIMDType(tmp)) { // TODO: check if we need this block here or other parts already deal with it. lvaSetVarDoNotEnregister(tmp DEBUGARG(DoNotEnregisterReason::IsStructArg)); } // Create a reference to the temp GenTree* dest = gtNewLclvNode(tmp, lvaTable[tmp].lvType); dest->gtFlags |= (GTF_DONT_CSE | GTF_VAR_DEF); // This is a def of the local, "entire" by construction. // Copy the valuetype to the temp GenTree* copyBlk = gtNewBlkOpNode(dest, argx, false /* not volatile */, true /* copyBlock */); copyBlk = fgMorphCopyBlock(copyBlk); #if FEATURE_FIXED_OUT_ARGS // Do the copy early, and evalute the temp later (see EvalArgsToTemps) // When on Unix create LCL_FLD for structs passed in more than one registers. See fgMakeTmpArgNode GenTree* arg = copyBlk; #else // FEATURE_FIXED_OUT_ARGS // Structs are always on the stack, and thus never need temps // so we have to put the copy and temp all into one expression. argEntry->tmpNum = tmp; GenTree* arg = fgMakeTmpArgNode(argEntry); // Change the expression to "(tmp=val),tmp" arg = gtNewOperNode(GT_COMMA, arg->TypeGet(), copyBlk, arg); #endif // FEATURE_FIXED_OUT_ARGS args->SetNode(arg); call->fgArgInfo->EvalToTmp(argEntry, tmp, arg); } #ifdef TARGET_ARM // See declaration for specification comment. void Compiler::fgAddSkippedRegsInPromotedStructArg(LclVarDsc* varDsc, unsigned firstArgRegNum, regMaskTP* pArgSkippedRegMask) { assert(varDsc->lvPromoted); // There's no way to do these calculations without breaking abstraction and assuming that // integer register arguments are consecutive ints. They are on ARM. // To start, figure out what register contains the last byte of the first argument. LclVarDsc* firstFldVarDsc = lvaGetDesc(varDsc->lvFieldLclStart); unsigned lastFldRegOfLastByte = (firstFldVarDsc->lvFldOffset + firstFldVarDsc->lvExactSize - 1) / TARGET_POINTER_SIZE; ; // Now we're keeping track of the register that the last field ended in; see what registers // subsequent fields start in, and whether any are skipped. // (We assume here the invariant that the fields are sorted in offset order.) for (unsigned fldVarOffset = 1; fldVarOffset < varDsc->lvFieldCnt; fldVarOffset++) { unsigned fldVarNum = varDsc->lvFieldLclStart + fldVarOffset; LclVarDsc* fldVarDsc = lvaGetDesc(fldVarNum); unsigned fldRegOffset = fldVarDsc->lvFldOffset / TARGET_POINTER_SIZE; assert(fldRegOffset >= lastFldRegOfLastByte); // Assuming sorted fields. // This loop should enumerate the offsets of any registers skipped. // Find what reg contains the last byte: // And start at the first register after that. If that isn't the first reg of the current for (unsigned skippedRegOffsets = lastFldRegOfLastByte + 1; skippedRegOffsets < fldRegOffset; skippedRegOffsets++) { // If the register number would not be an arg reg, we're done. if (firstArgRegNum + skippedRegOffsets >= MAX_REG_ARG) return; *pArgSkippedRegMask |= genRegMask(regNumber(firstArgRegNum + skippedRegOffsets)); } lastFldRegOfLastByte = (fldVarDsc->lvFldOffset + fldVarDsc->lvExactSize - 1) / TARGET_POINTER_SIZE; } } #endif // TARGET_ARM /***************************************************************************** * * A little helper used to rearrange nested commutative operations. The * effect is that nested associative, commutative operations are transformed * into a 'left-deep' tree, i.e. into something like this: * * (((a op b) op c) op d) op... */ #if REARRANGE_ADDS void Compiler::fgMoveOpsLeft(GenTree* tree) { GenTree* op1; GenTree* op2; genTreeOps oper; do { op1 = tree->AsOp()->gtOp1; op2 = tree->AsOp()->gtOp2; oper = tree->OperGet(); noway_assert(GenTree::OperIsCommutative(oper)); noway_assert(oper == GT_ADD || oper == GT_XOR || oper == GT_OR || oper == GT_AND || oper == GT_MUL); noway_assert(!varTypeIsFloating(tree->TypeGet()) || !opts.genFPorder); noway_assert(oper == op2->gtOper); // Commutativity doesn't hold if overflow checks are needed if (tree->gtOverflowEx() || op2->gtOverflowEx()) { return; } if (gtIsActiveCSE_Candidate(op2)) { // If we have marked op2 as a CSE candidate, // we can't perform a commutative reordering // because any value numbers that we computed for op2 // will be incorrect after performing a commutative reordering // return; } if (oper == GT_MUL && (op2->gtFlags & GTF_MUL_64RSLT)) { return; } // Check for GTF_ADDRMODE_NO_CSE flag on add/mul Binary Operators if (((oper == GT_ADD) || (oper == GT_MUL)) && ((tree->gtFlags & GTF_ADDRMODE_NO_CSE) != 0)) { return; } if ((tree->gtFlags | op2->gtFlags) & GTF_BOOLEAN) { // We could deal with this, but we were always broken and just hit the assert // below regarding flags, which means it's not frequent, so will just bail out. // See #195514 return; } noway_assert(!tree->gtOverflowEx() && !op2->gtOverflowEx()); GenTree* ad1 = op2->AsOp()->gtOp1; GenTree* ad2 = op2->AsOp()->gtOp2; // Compiler::optOptimizeBools() can create GT_OR of two GC pointers yeilding a GT_INT // We can not reorder such GT_OR trees // if (varTypeIsGC(ad1->TypeGet()) != varTypeIsGC(op2->TypeGet())) { break; } // Don't split up a byref calculation and create a new byref. E.g., // [byref]+ (ref, [int]+ (int, int)) => [byref]+ ([byref]+ (ref, int), int). // Doing this transformation could create a situation where the first // addition (that is, [byref]+ (ref, int) ) creates a byref pointer that // no longer points within the ref object. If a GC happens, the byref won't // get updated. This can happen, for instance, if one of the int components // is negative. It also requires the address generation be in a fully-interruptible // code region. // if (varTypeIsGC(op1->TypeGet()) && op2->TypeGet() == TYP_I_IMPL) { assert(varTypeIsGC(tree->TypeGet()) && (oper == GT_ADD)); break; } /* Change "(x op (y op z))" to "(x op y) op z" */ /* ie. "(op1 op (ad1 op ad2))" to "(op1 op ad1) op ad2" */ GenTree* new_op1 = op2; new_op1->AsOp()->gtOp1 = op1; new_op1->AsOp()->gtOp2 = ad1; /* Change the flags. */ // Make sure we arent throwing away any flags noway_assert((new_op1->gtFlags & ~(GTF_MAKE_CSE | GTF_DONT_CSE | // It is ok that new_op1->gtFlags contains GTF_DONT_CSE flag. GTF_REVERSE_OPS | // The reverse ops flag also can be set, it will be re-calculated GTF_NODE_MASK | GTF_ALL_EFFECT | GTF_UNSIGNED)) == 0); new_op1->gtFlags = (new_op1->gtFlags & (GTF_NODE_MASK | GTF_DONT_CSE)) | // Make sure we propagate GTF_DONT_CSE flag. (op1->gtFlags & GTF_ALL_EFFECT) | (ad1->gtFlags & GTF_ALL_EFFECT); /* Retype new_op1 if it has not/become a GC ptr. */ if (varTypeIsGC(op1->TypeGet())) { noway_assert((varTypeIsGC(tree->TypeGet()) && op2->TypeGet() == TYP_I_IMPL && oper == GT_ADD) || // byref(ref + (int+int)) (varTypeIsI(tree->TypeGet()) && op2->TypeGet() == TYP_I_IMPL && oper == GT_OR)); // int(gcref | int(gcref|intval)) new_op1->gtType = tree->gtType; } else if (varTypeIsGC(ad2->TypeGet())) { // Neither ad1 nor op1 are GC. So new_op1 isnt either noway_assert(op1->gtType == TYP_I_IMPL && ad1->gtType == TYP_I_IMPL); new_op1->gtType = TYP_I_IMPL; } // If new_op1 is a new expression. Assign it a new unique value number. // vnStore is null before the ValueNumber phase has run if (vnStore != nullptr) { // We can only keep the old value number on new_op1 if both op1 and ad2 // have the same non-NoVN value numbers. Since op is commutative, comparing // only ad2 and op1 is enough. if ((op1->gtVNPair.GetLiberal() == ValueNumStore::NoVN) || (ad2->gtVNPair.GetLiberal() == ValueNumStore::NoVN) || (ad2->gtVNPair.GetLiberal() != op1->gtVNPair.GetLiberal())) { new_op1->gtVNPair.SetBoth(vnStore->VNForExpr(nullptr, new_op1->TypeGet())); } } tree->AsOp()->gtOp1 = new_op1; tree->AsOp()->gtOp2 = ad2; /* If 'new_op1' is now the same nested op, process it recursively */ if ((ad1->gtOper == oper) && !ad1->gtOverflowEx()) { fgMoveOpsLeft(new_op1); } /* If 'ad2' is now the same nested op, process it * Instead of recursion, we set up op1 and op2 for the next loop. */ op1 = new_op1; op2 = ad2; } while ((op2->gtOper == oper) && !op2->gtOverflowEx()); return; } #endif /*****************************************************************************/ void Compiler::fgSetRngChkTarget(GenTree* tree, bool delay) { if (tree->OperIs(GT_BOUNDS_CHECK)) { GenTreeBoundsChk* const boundsChk = tree->AsBoundsChk(); BasicBlock* const failBlock = fgSetRngChkTargetInner(boundsChk->gtThrowKind, delay); if (failBlock != nullptr) { boundsChk->gtIndRngFailBB = failBlock; } } else if (tree->OperIs(GT_INDEX_ADDR)) { GenTreeIndexAddr* const indexAddr = tree->AsIndexAddr(); BasicBlock* const failBlock = fgSetRngChkTargetInner(SCK_RNGCHK_FAIL, delay); if (failBlock != nullptr) { indexAddr->gtIndRngFailBB = failBlock; } } else { noway_assert(tree->OperIs(GT_ARR_ELEM, GT_ARR_INDEX)); fgSetRngChkTargetInner(SCK_RNGCHK_FAIL, delay); } } BasicBlock* Compiler::fgSetRngChkTargetInner(SpecialCodeKind kind, bool delay) { if (opts.MinOpts()) { delay = false; } if (!opts.compDbgCode) { if (!delay && !compIsForInlining()) { // Create/find the appropriate "range-fail" label return fgRngChkTarget(compCurBB, kind); } } return nullptr; } /***************************************************************************** * * Expand a GT_INDEX node and fully morph the child operands * * The orginal GT_INDEX node is bashed into the GT_IND node that accesses * the array element. We expand the GT_INDEX node into a larger tree that * evaluates the array base and index. The simplest expansion is a GT_COMMA * with a GT_BOUNDS_CHECK and a GT_IND with a GTF_INX_RNGCHK flag. * For complex array or index expressions one or more GT_COMMA assignments * are inserted so that we only evaluate the array or index expressions once. * * The fully expanded tree is then morphed. This causes gtFoldExpr to * perform local constant prop and reorder the constants in the tree and * fold them. * * We then parse the resulting array element expression in order to locate * and label the constants and variables that occur in the tree. */ const int MAX_ARR_COMPLEXITY = 4; const int MAX_INDEX_COMPLEXITY = 4; GenTree* Compiler::fgMorphArrayIndex(GenTree* tree) { noway_assert(tree->gtOper == GT_INDEX); GenTreeIndex* asIndex = tree->AsIndex(); var_types elemTyp = asIndex->TypeGet(); unsigned elemSize = asIndex->gtIndElemSize; CORINFO_CLASS_HANDLE elemStructType = asIndex->gtStructElemClass; noway_assert(elemTyp != TYP_STRUCT || elemStructType != nullptr); // Fold "cns_str"[cns_index] to ushort constant // NOTE: don't do it for empty string, the operation will fail anyway if (opts.OptimizationEnabled() && asIndex->Arr()->OperIs(GT_CNS_STR) && !asIndex->Arr()->AsStrCon()->IsStringEmptyField() && asIndex->Index()->IsIntCnsFitsInI32()) { const int cnsIndex = static_cast<int>(asIndex->Index()->AsIntConCommon()->IconValue()); if (cnsIndex >= 0) { int length; const char16_t* str = info.compCompHnd->getStringLiteral(asIndex->Arr()->AsStrCon()->gtScpHnd, asIndex->Arr()->AsStrCon()->gtSconCPX, &length); if ((cnsIndex < length) && (str != nullptr)) { GenTree* cnsCharNode = gtNewIconNode(str[cnsIndex], TYP_INT); INDEBUG(cnsCharNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED); return cnsCharNode; } } } #ifdef FEATURE_SIMD if (featureSIMD && varTypeIsStruct(elemTyp) && structSizeMightRepresentSIMDType(elemSize)) { // If this is a SIMD type, this is the point at which we lose the type information, // so we need to set the correct type on the GT_IND. // (We don't care about the base type here, so we only check, but don't retain, the return value). unsigned simdElemSize = 0; if (getBaseJitTypeAndSizeOfSIMDType(elemStructType, &simdElemSize) != CORINFO_TYPE_UNDEF) { assert(simdElemSize == elemSize); elemTyp = getSIMDTypeForSize(elemSize); // This is the new type of the node. tree->gtType = elemTyp; // Now set elemStructType to null so that we don't confuse value numbering. elemStructType = nullptr; } } #endif // FEATURE_SIMD // Set up the array length's offset into lenOffs // And the first element's offset into elemOffs ssize_t lenOffs; ssize_t elemOffs; if (tree->gtFlags & GTF_INX_STRING_LAYOUT) { lenOffs = OFFSETOF__CORINFO_String__stringLen; elemOffs = OFFSETOF__CORINFO_String__chars; tree->gtFlags &= ~GTF_INX_STRING_LAYOUT; // Clear this flag as it is used for GTF_IND_VOLATILE } else { // We have a standard array lenOffs = OFFSETOF__CORINFO_Array__length; elemOffs = OFFSETOF__CORINFO_Array__data; } // In minopts, we expand GT_INDEX to GT_IND(GT_INDEX_ADDR) in order to minimize the size of the IR. As minopts // compilation time is roughly proportional to the size of the IR, this helps keep compilation times down. // Furthermore, this representation typically saves on code size in minopts w.r.t. the complete expansion // performed when optimizing, as it does not require LclVar nodes (which are always stack loads/stores in // minopts). // // When we *are* optimizing, we fully expand GT_INDEX to: // 1. Evaluate the array address expression and store the result in a temp if the expression is complex or // side-effecting. // 2. Evaluate the array index expression and store the result in a temp if the expression is complex or // side-effecting. // 3. Perform an explicit bounds check: GT_BOUNDS_CHECK(index, GT_ARR_LENGTH(array)) // 4. Compute the address of the element that will be accessed: // GT_ADD(GT_ADD(array, firstElementOffset), GT_MUL(index, elementSize)) // 5. Dereference the address with a GT_IND. // // This expansion explicitly exposes the bounds check and the address calculation to the optimizer, which allows // for more straightforward bounds-check removal, CSE, etc. if (opts.MinOpts()) { GenTree* const array = fgMorphTree(asIndex->Arr()); GenTree* const index = fgMorphTree(asIndex->Index()); GenTreeIndexAddr* const indexAddr = new (this, GT_INDEX_ADDR) GenTreeIndexAddr(array, index, elemTyp, elemStructType, elemSize, static_cast<unsigned>(lenOffs), static_cast<unsigned>(elemOffs)); indexAddr->gtFlags |= (array->gtFlags | index->gtFlags) & GTF_ALL_EFFECT; // Mark the indirection node as needing a range check if necessary. // Note this will always be true unless JitSkipArrayBoundCheck() is used if ((indexAddr->gtFlags & GTF_INX_RNGCHK) != 0) { fgSetRngChkTarget(indexAddr); } if (!tree->TypeIs(TYP_STRUCT)) { tree->ChangeOper(GT_IND); } else { DEBUG_DESTROY_NODE(tree); tree = gtNewObjNode(elemStructType, indexAddr); INDEBUG(tree->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED); } GenTreeIndir* const indir = tree->AsIndir(); indir->Addr() = indexAddr; bool canCSE = indir->CanCSE(); indir->gtFlags = GTF_IND_ARR_INDEX | (indexAddr->gtFlags & GTF_ALL_EFFECT); if (!canCSE) { indir->SetDoNotCSE(); } INDEBUG(indexAddr->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED); return indir; } GenTree* arrRef = asIndex->Arr(); GenTree* index = asIndex->Index(); bool chkd = ((tree->gtFlags & GTF_INX_RNGCHK) != 0); // if false, range checking will be disabled bool indexNonFaulting = ((tree->gtFlags & GTF_INX_NOFAULT) != 0); // if true, mark GTF_IND_NONFAULTING bool nCSE = ((tree->gtFlags & GTF_DONT_CSE) != 0); GenTree* arrRefDefn = nullptr; // non-NULL if we need to allocate a temp for the arrRef expression GenTree* indexDefn = nullptr; // non-NULL if we need to allocate a temp for the index expression GenTree* bndsChk = nullptr; // If we're doing range checking, introduce a GT_BOUNDS_CHECK node for the address. if (chkd) { GenTree* arrRef2 = nullptr; // The second copy will be used in array address expression GenTree* index2 = nullptr; // If the arrRef or index expressions involves an assignment, a call or reads from global memory, // then we *must* allocate a temporary in which to "localize" those values, to ensure that the // same values are used in the bounds check and the actual dereference. // Also we allocate the temporary when the expresion is sufficiently complex/expensive. // // Note that if the expression is a GT_FIELD, it has not yet been morphed so its true complexity is // not exposed. Without that condition there are cases of local struct fields that were previously, // needlessly, marked as GTF_GLOB_REF, and when that was fixed, there were some regressions that // were mostly ameliorated by adding this condition. // // Likewise, allocate a temporary if the expression is a GT_LCL_FLD node. These used to be created // after fgMorphArrayIndex from GT_FIELD trees so this preserves the existing behavior. This is // perhaps a decision that should be left to CSE but FX diffs show that it is slightly better to // do this here. if ((arrRef->gtFlags & (GTF_ASG | GTF_CALL | GTF_GLOB_REF)) || gtComplexityExceeds(&arrRef, MAX_ARR_COMPLEXITY) || arrRef->OperIs(GT_FIELD, GT_LCL_FLD)) { unsigned arrRefTmpNum = lvaGrabTemp(true DEBUGARG("arr expr")); arrRefDefn = gtNewTempAssign(arrRefTmpNum, arrRef); arrRef = gtNewLclvNode(arrRefTmpNum, arrRef->TypeGet()); arrRef2 = gtNewLclvNode(arrRefTmpNum, arrRef->TypeGet()); } else { arrRef2 = gtCloneExpr(arrRef); noway_assert(arrRef2 != nullptr); } if ((index->gtFlags & (GTF_ASG | GTF_CALL | GTF_GLOB_REF)) || gtComplexityExceeds(&index, MAX_ARR_COMPLEXITY) || index->OperIs(GT_FIELD, GT_LCL_FLD)) { unsigned indexTmpNum = lvaGrabTemp(true DEBUGARG("index expr")); indexDefn = gtNewTempAssign(indexTmpNum, index); index = gtNewLclvNode(indexTmpNum, index->TypeGet()); index2 = gtNewLclvNode(indexTmpNum, index->TypeGet()); } else { index2 = gtCloneExpr(index); noway_assert(index2 != nullptr); } // Next introduce a GT_BOUNDS_CHECK node var_types bndsChkType = TYP_INT; // By default, try to use 32-bit comparison for array bounds check. #ifdef TARGET_64BIT // The CLI Spec allows an array to be indexed by either an int32 or a native int. In the case // of a 64 bit architecture this means the array index can potentially be a TYP_LONG, so for this case, // the comparison will have to be widen to 64 bits. if (index->TypeGet() == TYP_I_IMPL) { bndsChkType = TYP_I_IMPL; } #endif // TARGET_64BIT GenTree* arrLen = gtNewArrLen(TYP_INT, arrRef, (int)lenOffs, compCurBB); if (bndsChkType != TYP_INT) { arrLen = gtNewCastNode(bndsChkType, arrLen, true, bndsChkType); } GenTreeBoundsChk* arrBndsChk = new (this, GT_BOUNDS_CHECK) GenTreeBoundsChk(index, arrLen, SCK_RNGCHK_FAIL); bndsChk = arrBndsChk; // Now we'll switch to using the second copies for arrRef and index // to compute the address expression arrRef = arrRef2; index = index2; } // Create the "addr" which is "*(arrRef + ((index * elemSize) + elemOffs))" GenTree* addr; #ifdef TARGET_64BIT // Widen 'index' on 64-bit targets if (index->TypeGet() != TYP_I_IMPL) { if (index->OperGet() == GT_CNS_INT) { index->gtType = TYP_I_IMPL; } else { index = gtNewCastNode(TYP_I_IMPL, index, true, TYP_I_IMPL); } } #endif // TARGET_64BIT /* Scale the index value if necessary */ if (elemSize > 1) { GenTree* size = gtNewIconNode(elemSize, TYP_I_IMPL); // Fix 392756 WP7 Crossgen // // During codegen optGetArrayRefScaleAndIndex() makes the assumption that op2 of a GT_MUL node // is a constant and is not capable of handling CSE'ing the elemSize constant into a lclvar. // Hence to prevent the constant from becoming a CSE we mark it as NO_CSE. // size->gtFlags |= GTF_DONT_CSE; /* Multiply by the array element size */ addr = gtNewOperNode(GT_MUL, TYP_I_IMPL, index, size); } else { addr = index; } // Be careful to only create the byref pointer when the full index expression is added to the array reference. // We don't want to create a partial byref address expression that doesn't include the full index offset: // a byref must point within the containing object. It is dangerous (especially when optimizations come into // play) to create a "partial" byref that doesn't point exactly to the correct object; there is risk that // the partial byref will not point within the object, and thus not get updated correctly during a GC. // This is mostly a risk in fully-interruptible code regions. // We can generate two types of trees for "addr": // // 1) "arrRef + (index + elemOffset)" // 2) "(arrRef + elemOffset) + index" // // XArch has powerful addressing modes such as [base + index*scale + offset] so it's fine with 1), // while for Arm we better try to make an invariant sub-tree as large as possible, which is usually // "(arrRef + elemOffset)" and is CSE/LoopHoisting friendly => produces better codegen. // 2) should still be safe from GC's point of view since both ADD operations are byref and point to // within the object so GC will be able to correctly track and update them. bool groupArrayRefWithElemOffset = false; #ifdef TARGET_ARMARCH groupArrayRefWithElemOffset = true; // TODO: in some cases even on ARM we better use 1) shape because if "index" is invariant and "arrRef" is not // we at least will be able to hoist/CSE "index + elemOffset" in some cases. // See https://github.com/dotnet/runtime/pull/61293#issuecomment-964146497 // Use 2) form only for primitive types for now - it significantly reduced number of size regressions if (!varTypeIsIntegral(elemTyp) && !varTypeIsFloating(elemTyp)) { groupArrayRefWithElemOffset = false; } #endif // First element's offset GenTree* elemOffset = gtNewIconNode(elemOffs, TYP_I_IMPL); if (groupArrayRefWithElemOffset) { GenTree* basePlusOffset = gtNewOperNode(GT_ADD, TYP_BYREF, arrRef, elemOffset); addr = gtNewOperNode(GT_ADD, TYP_BYREF, basePlusOffset, addr); } else { addr = gtNewOperNode(GT_ADD, TYP_I_IMPL, addr, elemOffset); addr = gtNewOperNode(GT_ADD, TYP_BYREF, arrRef, addr); } assert(((tree->gtDebugFlags & GTF_DEBUG_NODE_LARGE) != 0) || (GenTree::s_gtNodeSizes[GT_IND] == TREE_NODE_SZ_SMALL)); // Change the orginal GT_INDEX node into a GT_IND node tree->SetOper(GT_IND); // If the index node is a floating-point type, notify the compiler // we'll potentially use floating point registers at the time of codegen. if (varTypeUsesFloatReg(tree->gtType)) { this->compFloatingPointUsed = true; } // We've now consumed the GTF_INX_RNGCHK and GTF_INX_NOFAULT, and the node // is no longer a GT_INDEX node. tree->gtFlags &= ~(GTF_INX_RNGCHK | GTF_INX_NOFAULT); tree->AsOp()->gtOp1 = addr; // This is an array index expression. tree->gtFlags |= GTF_IND_ARR_INDEX; // If there's a bounds check, the indir won't fault. if (bndsChk || indexNonFaulting) { tree->gtFlags |= GTF_IND_NONFAULTING; } else { tree->gtFlags |= GTF_EXCEPT; } if (nCSE) { tree->gtFlags |= GTF_DONT_CSE; } // Store information about it. GetArrayInfoMap()->Set(tree, ArrayInfo(elemTyp, elemSize, (int)elemOffs, elemStructType)); // Remember this 'indTree' that we just created, as we still need to attach the fieldSeq information to it. GenTree* indTree = tree; // Did we create a bndsChk tree? if (bndsChk) { // Use a GT_COMMA node to prepend the array bound check // tree = gtNewOperNode(GT_COMMA, elemTyp, bndsChk, tree); /* Mark the indirection node as needing a range check */ fgSetRngChkTarget(bndsChk); } if (indexDefn != nullptr) { // Use a GT_COMMA node to prepend the index assignment // tree = gtNewOperNode(GT_COMMA, tree->TypeGet(), indexDefn, tree); } if (arrRefDefn != nullptr) { // Use a GT_COMMA node to prepend the arRef assignment // tree = gtNewOperNode(GT_COMMA, tree->TypeGet(), arrRefDefn, tree); } JITDUMP("fgMorphArrayIndex (before remorph):\n") DISPTREE(tree) // Currently we morph the tree to perform some folding operations prior // to attaching fieldSeq info and labeling constant array index contributions // tree = fgMorphTree(tree); JITDUMP("fgMorphArrayIndex (after remorph):\n") DISPTREE(tree) // Ideally we just want to proceed to attaching fieldSeq info and labeling the // constant array index contributions, but the morphing operation may have changed // the 'tree' into something that now unconditionally throws an exception. // // In such case the gtEffectiveVal could be a new tree or it's gtOper could be modified // or it could be left unchanged. If it is unchanged then we should not return, // instead we should proceed to attaching fieldSeq info, etc... // GenTree* arrElem = tree->gtEffectiveVal(); if (fgIsCommaThrow(tree)) { if ((arrElem != indTree) || // A new tree node may have been created (!indTree->OperIs(GT_IND))) // The GT_IND may have been changed to a GT_CNS_INT { return tree; // Just return the Comma-Throw, don't try to attach the fieldSeq info, etc.. } } assert(!fgGlobalMorph || (arrElem->gtDebugFlags & GTF_DEBUG_NODE_MORPHED)); DBEXEC(fgGlobalMorph && (arrElem == tree), tree->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED) addr = arrElem->gtGetOp1(); GenTree* cnsOff = nullptr; if (addr->OperIs(GT_ADD)) { GenTree* addrOp1 = addr->gtGetOp1(); if (groupArrayRefWithElemOffset) { if (addrOp1->OperIs(GT_ADD) && addrOp1->gtGetOp2()->IsCnsIntOrI()) { assert(addrOp1->gtGetOp1()->TypeIs(TYP_REF)); cnsOff = addrOp1->gtGetOp2(); addr = addr->gtGetOp2(); // Label any constant array index contributions with #ConstantIndex and any LclVars with // GTF_VAR_ARR_INDEX addr->LabelIndex(this); } else { assert(addr->gtGetOp2()->IsCnsIntOrI()); cnsOff = addr->gtGetOp2(); addr = nullptr; } } else { assert(addr->TypeIs(TYP_BYREF)); assert(addr->gtGetOp1()->TypeIs(TYP_REF)); addr = addr->gtGetOp2(); // Look for the constant [#FirstElem] node here, or as the RHS of an ADD. if (addr->IsCnsIntOrI()) { cnsOff = addr; addr = nullptr; } else { if ((addr->OperIs(GT_ADD)) && addr->gtGetOp2()->IsCnsIntOrI()) { cnsOff = addr->gtGetOp2(); addr = addr->gtGetOp1(); } // Label any constant array index contributions with #ConstantIndex and any LclVars with // GTF_VAR_ARR_INDEX addr->LabelIndex(this); } } } else if (addr->IsCnsIntOrI()) { cnsOff = addr; } FieldSeqNode* firstElemFseq = GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField); if ((cnsOff != nullptr) && (cnsOff->AsIntCon()->gtIconVal == elemOffs)) { // Assign it the [#FirstElem] field sequence // cnsOff->AsIntCon()->gtFieldSeq = firstElemFseq; } else // We have folded the first element's offset with the index expression { // Build the [#ConstantIndex, #FirstElem] field sequence // FieldSeqNode* constantIndexFseq = GetFieldSeqStore()->CreateSingleton(FieldSeqStore::ConstantIndexPseudoField); FieldSeqNode* fieldSeq = GetFieldSeqStore()->Append(constantIndexFseq, firstElemFseq); if (cnsOff == nullptr) // It must have folded into a zero offset { // Record in the general zero-offset map. fgAddFieldSeqForZeroOffset(addr, fieldSeq); } else { cnsOff->AsIntCon()->gtFieldSeq = fieldSeq; } } return tree; } #ifdef TARGET_X86 /***************************************************************************** * * Wrap fixed stack arguments for varargs functions to go through varargs * cookie to access them, except for the cookie itself. * * Non-x86 platforms are allowed to access all arguments directly * so we don't need this code. * */ GenTree* Compiler::fgMorphStackArgForVarArgs(unsigned lclNum, var_types varType, unsigned lclOffs) { /* For the fixed stack arguments of a varargs function, we need to go through the varargs cookies to access them, except for the cookie itself */ LclVarDsc* varDsc = lvaGetDesc(lclNum); if (varDsc->lvIsParam && !varDsc->lvIsRegArg && lclNum != lvaVarargsHandleArg) { // Create a node representing the local pointing to the base of the args GenTree* ptrArg = gtNewOperNode(GT_SUB, TYP_I_IMPL, gtNewLclvNode(lvaVarargsBaseOfStkArgs, TYP_I_IMPL), gtNewIconNode(varDsc->GetStackOffset() - codeGen->intRegState.rsCalleeRegArgCount * REGSIZE_BYTES - lclOffs)); // Access the argument through the local GenTree* tree; if (varTypeIsStruct(varType)) { CORINFO_CLASS_HANDLE typeHnd = varDsc->GetStructHnd(); assert(typeHnd != nullptr); tree = gtNewObjNode(typeHnd, ptrArg); } else { tree = gtNewOperNode(GT_IND, varType, ptrArg); } tree->gtFlags |= GTF_IND_TGTANYWHERE; if (varDsc->IsAddressExposed()) { tree->gtFlags |= GTF_GLOB_REF; } return fgMorphTree(tree); } return NULL; } #endif /***************************************************************************** * * Transform the given GT_LCL_VAR tree for code generation. */ GenTree* Compiler::fgMorphLocalVar(GenTree* tree, bool forceRemorph) { assert(tree->gtOper == GT_LCL_VAR); unsigned lclNum = tree->AsLclVarCommon()->GetLclNum(); var_types varType = lvaGetRealType(lclNum); LclVarDsc* varDsc = lvaGetDesc(lclNum); if (varDsc->IsAddressExposed()) { tree->gtFlags |= GTF_GLOB_REF; } #ifdef TARGET_X86 if (info.compIsVarArgs) { GenTree* newTree = fgMorphStackArgForVarArgs(lclNum, varType, 0); if (newTree != nullptr) { if (newTree->OperIsBlk() && ((tree->gtFlags & GTF_VAR_DEF) == 0)) { newTree->SetOper(GT_IND); } return newTree; } } #endif // TARGET_X86 /* If not during the global morphing phase bail */ if (!fgGlobalMorph && !forceRemorph) { return tree; } bool varAddr = (tree->gtFlags & GTF_DONT_CSE) != 0; noway_assert(!(tree->gtFlags & GTF_VAR_DEF) || varAddr); // GTF_VAR_DEF should always imply varAddr if (!varAddr && varDsc->lvNormalizeOnLoad()) { // TYP_BOOL quirk: previously, the code in optAssertionIsSubrange did not handle TYP_BOOL. // Now it does, but this leads to some regressions because we lose the uniform VNs for trees // that represent the "reduced" normalize-on-load locals, i. e. LCL_VAR(small type V00), created // here with local assertions, and "expanded", i. e. CAST(small type <- LCL_VAR(int V00)). // This is a pretty fundamental problem with how normalize-on-load locals appear to the optimizer. // This quirk preserves the previous behavior. // TODO-CQ: fix the VNs for normalize-on-load locals and remove this quirk. bool isBoolQuirk = varType == TYP_BOOL; // Assertion prop can tell us to omit adding a cast here. This is // useful when the local is a small-typed parameter that is passed in a // register: in that case, the ABI specifies that the upper bits might // be invalid, but the assertion guarantees us that we have normalized // when we wrote it. if (optLocalAssertionProp && !isBoolQuirk && optAssertionIsSubrange(tree, IntegralRange::ForType(varType), apFull) != NO_ASSERTION_INDEX) { // The previous assertion can guarantee us that if this node gets // assigned a register, it will be normalized already. It is still // possible that this node ends up being in memory, in which case // normalization will still be needed, so we better have the right // type. assert(tree->TypeGet() == varDsc->TypeGet()); return tree; } // Small-typed arguments and aliased locals are normalized on load. // Other small-typed locals are normalized on store. // Also, under the debugger as the debugger could write to the variable. // If this is one of the former, insert a narrowing cast on the load. // ie. Convert: var-short --> cast-short(var-int) tree->gtType = TYP_INT; fgMorphTreeDone(tree); tree = gtNewCastNode(TYP_INT, tree, false, varType); fgMorphTreeDone(tree); return tree; } return tree; } /***************************************************************************** Grab a temp for big offset morphing. This method will grab a new temp if no temp of this "type" has been created. Or it will return the same cached one if it has been created. */ unsigned Compiler::fgGetBigOffsetMorphingTemp(var_types type) { unsigned lclNum = fgBigOffsetMorphingTemps[type]; if (lclNum == BAD_VAR_NUM) { // We haven't created a temp for this kind of type. Create one now. lclNum = lvaGrabTemp(false DEBUGARG("Big Offset Morphing")); fgBigOffsetMorphingTemps[type] = lclNum; } else { // We better get the right type. noway_assert(lvaTable[lclNum].TypeGet() == type); } noway_assert(lclNum != BAD_VAR_NUM); return lclNum; } /***************************************************************************** * * Transform the given GT_FIELD tree for code generation. */ GenTree* Compiler::fgMorphField(GenTree* tree, MorphAddrContext* mac) { assert(tree->gtOper == GT_FIELD); CORINFO_FIELD_HANDLE symHnd = tree->AsField()->gtFldHnd; unsigned fldOffset = tree->AsField()->gtFldOffset; GenTree* objRef = tree->AsField()->GetFldObj(); bool fieldMayOverlap = false; bool objIsLocal = false; if (fgGlobalMorph && (objRef != nullptr) && (objRef->gtOper == GT_ADDR)) { // Make sure we've checked if 'objRef' is an address of an implicit-byref parameter. // If it is, fgMorphImplicitByRefArgs may change it do a different opcode, which the // simd field rewrites are sensitive to. fgMorphImplicitByRefArgs(objRef); } noway_assert(((objRef != nullptr) && ((objRef->IsLocalAddrExpr() != nullptr) || (objRef->IsImplicitByrefParameterValue(this) != nullptr))) || (tree->gtFlags & GTF_GLOB_REF) != 0); if (tree->AsField()->gtFldMayOverlap) { fieldMayOverlap = true; // Reset the flag because we may reuse the node. tree->AsField()->gtFldMayOverlap = false; } #ifdef FEATURE_SIMD // if this field belongs to simd struct, translate it to simd intrinsic. if (mac == nullptr) { if (IsBaselineSimdIsaSupported()) { GenTree* newTree = fgMorphFieldToSimdGetElement(tree); if (newTree != tree) { newTree = fgMorphTree(newTree); return newTree; } } } else if ((objRef != nullptr) && (objRef->OperGet() == GT_ADDR) && varTypeIsSIMD(objRef->gtGetOp1())) { GenTreeLclVarCommon* lcl = objRef->IsLocalAddrExpr(); if (lcl != nullptr) { lvaSetVarDoNotEnregister(lcl->GetLclNum() DEBUGARG(DoNotEnregisterReason::LocalField)); } } #endif // Create a default MorphAddrContext early so it doesn't go out of scope // before it is used. MorphAddrContext defMAC(MACK_Ind); /* Is this an instance data member? */ if (objRef) { GenTree* addr; objIsLocal = objRef->IsLocal(); if (tree->gtFlags & GTF_IND_TLS_REF) { NO_WAY("instance field can not be a TLS ref."); } /* We'll create the expression "*(objRef + mem_offs)" */ noway_assert(varTypeIsGC(objRef->TypeGet()) || objRef->TypeGet() == TYP_I_IMPL); /* Now we have a tree like this: +--------------------+ | GT_FIELD | tree +----------+---------+ | +--------------+-------------+ |tree->AsField()->GetFldObj()| +--------------+-------------+ We want to make it like this (when fldOffset is <= MAX_UNCHECKED_OFFSET_FOR_NULL_OBJECT): +--------------------+ | GT_IND/GT_OBJ | tree +---------+----------+ | | +---------+----------+ | GT_ADD | addr +---------+----------+ | / \ / \ / \ +-------------------+ +----------------------+ | objRef | | fldOffset | | | | (when fldOffset !=0) | +-------------------+ +----------------------+ or this (when fldOffset is > MAX_UNCHECKED_OFFSET_FOR_NULL_OBJECT): +--------------------+ | GT_IND/GT_OBJ | tree +----------+---------+ | +----------+---------+ | GT_COMMA | comma2 +----------+---------+ | / \ / \ / \ / \ +---------+----------+ +---------+----------+ comma | GT_COMMA | | "+" (i.e. GT_ADD) | addr +---------+----------+ +---------+----------+ | | / \ / \ / \ / \ / \ / \ +-----+-----+ +-----+-----+ +---------+ +-----------+ asg | GT_ASG | ind | GT_IND | | tmpLcl | | fldOffset | +-----+-----+ +-----+-----+ +---------+ +-----------+ | | / \ | / \ | / \ | +-----+-----+ +-----+-----+ +-----------+ | tmpLcl | | objRef | | tmpLcl | +-----------+ +-----------+ +-----------+ */ var_types objRefType = objRef->TypeGet(); GenTree* comma = nullptr; // NULL mac means we encounter the GT_FIELD first. This denotes a dereference of the field, // and thus is equivalent to a MACK_Ind with zero offset. if (mac == nullptr) { mac = &defMAC; } // This flag is set to enable the "conservative" style of explicit null-check insertion. // This means that we insert an explicit null check whenever we create byref by adding a // constant offset to a ref, in a MACK_Addr context (meaning that the byref is not immediately // dereferenced). The alternative is "aggressive", which would not insert such checks (for // small offsets); in this plan, we would transfer some null-checking responsibility to // callee's of methods taking byref parameters. They would have to add explicit null checks // when creating derived byrefs from argument byrefs by adding constants to argument byrefs, in // contexts where the resulting derived byref is not immediately dereferenced (or if the offset is too // large). To make the "aggressive" scheme work, however, we'd also have to add explicit derived-from-null // checks for byref parameters to "external" methods implemented in C++, and in P/Invoke stubs. // This is left here to point out how to implement it. CLANG_FORMAT_COMMENT_ANCHOR; #define CONSERVATIVE_NULL_CHECK_BYREF_CREATION 1 bool addExplicitNullCheck = false; // Implicit byref locals and string literals are never null. if (fgAddrCouldBeNull(objRef)) { // If the objRef is a GT_ADDR node, it, itself, never requires null checking. The expression // whose address is being taken is either a local or static variable, whose address is necessarily // non-null, or else it is a field dereference, which will do its own bounds checking if necessary. if (objRef->gtOper != GT_ADDR && (mac->m_kind == MACK_Addr || mac->m_kind == MACK_Ind)) { if (!mac->m_allConstantOffsets || fgIsBigOffset(mac->m_totalOffset + fldOffset)) { addExplicitNullCheck = true; } else { // In R2R mode the field offset for some fields may change when the code // is loaded. So we can't rely on a zero offset here to suppress the null check. // // See GitHub issue #16454. bool fieldHasChangeableOffset = false; #ifdef FEATURE_READYTORUN fieldHasChangeableOffset = (tree->AsField()->gtFieldLookup.addr != nullptr); #endif #if CONSERVATIVE_NULL_CHECK_BYREF_CREATION addExplicitNullCheck = (mac->m_kind == MACK_Addr) && ((mac->m_totalOffset + fldOffset > 0) || fieldHasChangeableOffset); #else addExplicitNullCheck = (objRef->gtType == TYP_BYREF && mac->m_kind == MACK_Addr && ((mac->m_totalOffset + fldOffset > 0) || fieldHasChangeableOffset)); #endif } } } if (addExplicitNullCheck) { #ifdef DEBUG if (verbose) { printf("Before explicit null check morphing:\n"); gtDispTree(tree); } #endif // // Create the "comma" subtree // GenTree* asg = nullptr; GenTree* nullchk; unsigned lclNum; if (objRef->gtOper != GT_LCL_VAR) { lclNum = fgGetBigOffsetMorphingTemp(genActualType(objRef->TypeGet())); // Create the "asg" node asg = gtNewTempAssign(lclNum, objRef); } else { lclNum = objRef->AsLclVarCommon()->GetLclNum(); } GenTree* lclVar = gtNewLclvNode(lclNum, objRefType); nullchk = gtNewNullCheck(lclVar, compCurBB); nullchk->gtFlags |= GTF_DONT_CSE; // Don't try to create a CSE for these TYP_BYTE indirections if (asg) { // Create the "comma" node. comma = gtNewOperNode(GT_COMMA, TYP_VOID, // We don't want to return anything from this "comma" node. // Set the type to TYP_VOID, so we can select "cmp" instruction // instead of "mov" instruction later on. asg, nullchk); } else { comma = nullchk; } addr = gtNewLclvNode(lclNum, objRefType); // Use "tmpLcl" to create "addr" node. } else { addr = objRef; } #ifdef FEATURE_READYTORUN if (tree->AsField()->gtFieldLookup.addr != nullptr) { GenTree* offsetNode = nullptr; if (tree->AsField()->gtFieldLookup.accessType == IAT_PVALUE) { offsetNode = gtNewIndOfIconHandleNode(TYP_I_IMPL, (size_t)tree->AsField()->gtFieldLookup.addr, GTF_ICON_CONST_PTR, true); #ifdef DEBUG offsetNode->gtGetOp1()->AsIntCon()->gtTargetHandle = (size_t)symHnd; #endif } else { noway_assert(!"unexpected accessType for R2R field access"); } var_types addType = (objRefType == TYP_I_IMPL) ? TYP_I_IMPL : TYP_BYREF; addr = gtNewOperNode(GT_ADD, addType, addr, offsetNode); } #endif if (fldOffset != 0) { // Generate the "addr" node. /* Add the member offset to the object's address */ FieldSeqNode* fieldSeq = fieldMayOverlap ? FieldSeqStore::NotAField() : GetFieldSeqStore()->CreateSingleton(symHnd); addr = gtNewOperNode(GT_ADD, (var_types)(objRefType == TYP_I_IMPL ? TYP_I_IMPL : TYP_BYREF), addr, gtNewIconHandleNode(fldOffset, GTF_ICON_FIELD_OFF, fieldSeq)); } // Now let's set the "tree" as a GT_IND tree. tree->SetOper(GT_IND); tree->AsOp()->gtOp1 = addr; tree->SetIndirExceptionFlags(this); if (addExplicitNullCheck) { // // Create "comma2" node and link it to "tree". // GenTree* comma2; comma2 = gtNewOperNode(GT_COMMA, addr->TypeGet(), // The type of "comma2" node is the same as the type of "addr" node. comma, addr); tree->AsOp()->gtOp1 = comma2; } #ifdef DEBUG if (verbose) { if (addExplicitNullCheck) { printf("After adding explicit null check:\n"); gtDispTree(tree); } } #endif } else /* This is a static data member */ { if (tree->gtFlags & GTF_IND_TLS_REF) { // Thread Local Storage static field reference // // Field ref is a TLS 'Thread-Local-Storage' reference // // Build this tree: IND(*) # // | // ADD(I_IMPL) // / \. // / CNS(fldOffset) // / // / // / // IND(I_IMPL) == [Base of this DLL's TLS] // | // ADD(I_IMPL) // / \. // / CNS(IdValue*4) or MUL // / / \. // IND(I_IMPL) / CNS(4) // | / // CNS(TLS_HDL,0x2C) IND // | // CNS(pIdAddr) // // # Denotes the orginal node // void** pIdAddr = nullptr; unsigned IdValue = info.compCompHnd->getFieldThreadLocalStoreID(symHnd, (void**)&pIdAddr); // // If we can we access the TLS DLL index ID value directly // then pIdAddr will be NULL and // IdValue will be the actual TLS DLL index ID // GenTree* dllRef = nullptr; if (pIdAddr == nullptr) { if (IdValue != 0) { dllRef = gtNewIconNode(IdValue * 4, TYP_I_IMPL); } } else { dllRef = gtNewIndOfIconHandleNode(TYP_I_IMPL, (size_t)pIdAddr, GTF_ICON_CONST_PTR, true); // Next we multiply by 4 dllRef = gtNewOperNode(GT_MUL, TYP_I_IMPL, dllRef, gtNewIconNode(4, TYP_I_IMPL)); } #define WIN32_TLS_SLOTS (0x2C) // Offset from fs:[0] where the pointer to the slots resides // Mark this ICON as a TLS_HDL, codegen will use FS:[cns] GenTree* tlsRef = gtNewIconHandleNode(WIN32_TLS_SLOTS, GTF_ICON_TLS_HDL); // Translate GTF_FLD_INITCLASS to GTF_ICON_INITCLASS if ((tree->gtFlags & GTF_FLD_INITCLASS) != 0) { tree->gtFlags &= ~GTF_FLD_INITCLASS; tlsRef->gtFlags |= GTF_ICON_INITCLASS; } tlsRef = gtNewOperNode(GT_IND, TYP_I_IMPL, tlsRef); if (dllRef != nullptr) { /* Add the dllRef */ tlsRef = gtNewOperNode(GT_ADD, TYP_I_IMPL, tlsRef, dllRef); } /* indirect to have tlsRef point at the base of the DLLs Thread Local Storage */ tlsRef = gtNewOperNode(GT_IND, TYP_I_IMPL, tlsRef); if (fldOffset != 0) { FieldSeqNode* fieldSeq = fieldMayOverlap ? FieldSeqStore::NotAField() : GetFieldSeqStore()->CreateSingleton(symHnd); GenTree* fldOffsetNode = new (this, GT_CNS_INT) GenTreeIntCon(TYP_INT, fldOffset, fieldSeq); /* Add the TLS static field offset to the address */ tlsRef = gtNewOperNode(GT_ADD, TYP_I_IMPL, tlsRef, fldOffsetNode); } // Final indirect to get to actual value of TLS static field tree->SetOper(GT_IND); tree->AsOp()->gtOp1 = tlsRef; noway_assert(tree->gtFlags & GTF_IND_TLS_REF); } else { assert(!fieldMayOverlap); // Normal static field reference // // If we can we access the static's address directly // then pFldAddr will be NULL and // fldAddr will be the actual address of the static field // void** pFldAddr = nullptr; void* fldAddr = info.compCompHnd->getFieldAddress(symHnd, (void**)&pFldAddr); // We should always be able to access this static field address directly // assert(pFldAddr == nullptr); // For boxed statics, this direct address will be for the box. We have already added // the indirection for the field itself and attached the sequence, in importation. bool isBoxedStatic = gtIsStaticFieldPtrToBoxedStruct(tree->TypeGet(), symHnd); FieldSeqNode* fldSeq = !isBoxedStatic ? GetFieldSeqStore()->CreateSingleton(symHnd) : FieldSeqStore::NotAField(); // TODO-CQ: enable this optimization for 32 bit targets. bool isStaticReadOnlyInited = false; #ifdef TARGET_64BIT if (tree->TypeIs(TYP_REF) && !isBoxedStatic) { bool pIsSpeculative = true; if (info.compCompHnd->getStaticFieldCurrentClass(symHnd, &pIsSpeculative) != NO_CLASS_HANDLE) { isStaticReadOnlyInited = !pIsSpeculative; } } #endif // TARGET_64BIT // TODO: choices made below have mostly historical reasons and // should be unified to always use the IND(<address>) form. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_64BIT bool preferIndir = isBoxedStatic || isStaticReadOnlyInited || (IMAGE_REL_BASED_REL32 != eeGetRelocTypeHint(fldAddr)); #else // !TARGET_64BIT bool preferIndir = isBoxedStatic; #endif // !TARGET_64BIT if (preferIndir) { GenTreeFlags handleKind = GTF_EMPTY; if (isBoxedStatic) { handleKind = GTF_ICON_STATIC_BOX_PTR; } else if (isStaticReadOnlyInited) { handleKind = GTF_ICON_CONST_PTR; } else { handleKind = GTF_ICON_STATIC_HDL; } GenTree* addr = gtNewIconHandleNode((size_t)fldAddr, handleKind, fldSeq); // Translate GTF_FLD_INITCLASS to GTF_ICON_INITCLASS, if we need to. if (((tree->gtFlags & GTF_FLD_INITCLASS) != 0) && !isStaticReadOnlyInited) { tree->gtFlags &= ~GTF_FLD_INITCLASS; addr->gtFlags |= GTF_ICON_INITCLASS; } tree->SetOper(GT_IND); tree->AsOp()->gtOp1 = addr; if (isBoxedStatic) { // The box for the static cannot be null, and is logically invariant, since it // represents (a base for) the static's address. tree->gtFlags |= (GTF_IND_INVARIANT | GTF_IND_NONFAULTING | GTF_IND_NONNULL); } else if (isStaticReadOnlyInited) { JITDUMP("Marking initialized static read-only field '%s' as invariant.\n", eeGetFieldName(symHnd)); // Static readonly field is not null at this point (see getStaticFieldCurrentClass impl). tree->gtFlags |= (GTF_IND_INVARIANT | GTF_IND_NONFAULTING | GTF_IND_NONNULL); } return fgMorphSmpOp(tree); } else { // Only volatile or classinit could be set, and they map over noway_assert((tree->gtFlags & ~(GTF_FLD_VOLATILE | GTF_FLD_INITCLASS | GTF_COMMON_MASK)) == 0); static_assert_no_msg(GTF_FLD_VOLATILE == GTF_CLS_VAR_VOLATILE); static_assert_no_msg(GTF_FLD_INITCLASS == GTF_CLS_VAR_INITCLASS); tree->SetOper(GT_CLS_VAR); tree->AsClsVar()->gtClsVarHnd = symHnd; tree->AsClsVar()->gtFieldSeq = fldSeq; } return tree; } } noway_assert(tree->gtOper == GT_IND); if (fldOffset == 0) { GenTree* addr = tree->AsOp()->gtOp1; // 'addr' may be a GT_COMMA. Skip over any comma nodes addr = addr->gtEffectiveVal(); #ifdef DEBUG if (verbose) { printf("\nBefore calling fgAddFieldSeqForZeroOffset:\n"); gtDispTree(tree); } #endif // We expect 'addr' to be an address at this point. assert(addr->TypeGet() == TYP_BYREF || addr->TypeGet() == TYP_I_IMPL || addr->TypeGet() == TYP_REF); // Since we don't make a constant zero to attach the field sequence to, associate it with the "addr" node. FieldSeqNode* fieldSeq = fieldMayOverlap ? FieldSeqStore::NotAField() : GetFieldSeqStore()->CreateSingleton(symHnd); fgAddFieldSeqForZeroOffset(addr, fieldSeq); } // Pass down the current mac; if non null we are computing an address GenTree* result = fgMorphSmpOp(tree, mac); #ifdef DEBUG if (verbose) { printf("\nFinal value of Compiler::fgMorphField after calling fgMorphSmpOp:\n"); gtDispTree(result); } #endif return result; } //------------------------------------------------------------------------------ // fgMorphCallInline: attempt to inline a call // // Arguments: // call - call expression to inline, inline candidate // inlineResult - result tracking and reporting // // Notes: // Attempts to inline the call. // // If successful, callee's IR is inserted in place of the call, and // is marked with an InlineContext. // // If unsuccessful, the transformations done in anticipation of a // possible inline are undone, and the candidate flag on the call // is cleared. void Compiler::fgMorphCallInline(GenTreeCall* call, InlineResult* inlineResult) { bool inliningFailed = false; // Is this call an inline candidate? if (call->IsInlineCandidate()) { InlineContext* createdContext = nullptr; // Attempt the inline fgMorphCallInlineHelper(call, inlineResult, &createdContext); // We should have made up our minds one way or another.... assert(inlineResult->IsDecided()); // If we failed to inline, we have a bit of work to do to cleanup if (inlineResult->IsFailure()) { if (createdContext != nullptr) { // We created a context before we got to the failure, so mark // it as failed in the tree. createdContext->SetFailed(inlineResult); } else { #ifdef DEBUG // In debug we always put all inline attempts into the inline tree. InlineContext* ctx = m_inlineStrategy->NewContext(call->gtInlineCandidateInfo->inlinersContext, fgMorphStmt, call); ctx->SetFailed(inlineResult); #endif } inliningFailed = true; // Clear the Inline Candidate flag so we can ensure later we tried // inlining all candidates. // call->gtFlags &= ~GTF_CALL_INLINE_CANDIDATE; } } else { // This wasn't an inline candidate. So it must be a GDV candidate. assert(call->IsGuardedDevirtualizationCandidate()); // We already know we can't inline this call, so don't even bother to try. inliningFailed = true; } // If we failed to inline (or didn't even try), do some cleanup. if (inliningFailed) { if (call->gtReturnType != TYP_VOID) { JITDUMP("Inlining [%06u] failed, so bashing " FMT_STMT " to NOP\n", dspTreeID(call), fgMorphStmt->GetID()); // Detach the GT_CALL tree from the original statement by // hanging a "nothing" node to it. Later the "nothing" node will be removed // and the original GT_CALL tree will be picked up by the GT_RET_EXPR node. noway_assert(fgMorphStmt->GetRootNode() == call); fgMorphStmt->SetRootNode(gtNewNothingNode()); } } } //------------------------------------------------------------------------------ // fgMorphCallInlineHelper: Helper to attempt to inline a call // // Arguments: // call - call expression to inline, inline candidate // result - result to set to success or failure // createdContext - The context that was created if the inline attempt got to the inliner. // // Notes: // Attempts to inline the call. // // If successful, callee's IR is inserted in place of the call, and // is marked with an InlineContext. // // If unsuccessful, the transformations done in anticipation of a // possible inline are undone, and the candidate flag on the call // is cleared. // // If a context was created because we got to the importer then it is output by this function. // If the inline succeeded, this context will already be marked as successful. If it failed and // a context is returned, then it will not have been marked as success or failed. void Compiler::fgMorphCallInlineHelper(GenTreeCall* call, InlineResult* result, InlineContext** createdContext) { // Don't expect any surprises here. assert(result->IsCandidate()); if (lvaCount >= MAX_LV_NUM_COUNT_FOR_INLINING) { // For now, attributing this to call site, though it's really // more of a budget issue (lvaCount currently includes all // caller and prospective callee locals). We still might be // able to inline other callees into this caller, or inline // this callee in other callers. result->NoteFatal(InlineObservation::CALLSITE_TOO_MANY_LOCALS); return; } if (call->IsVirtual()) { result->NoteFatal(InlineObservation::CALLSITE_IS_VIRTUAL); return; } // Re-check this because guarded devirtualization may allow these through. if (gtIsRecursiveCall(call) && call->IsImplicitTailCall()) { result->NoteFatal(InlineObservation::CALLSITE_IMPLICIT_REC_TAIL_CALL); return; } // impMarkInlineCandidate() is expected not to mark tail prefixed calls // and recursive tail calls as inline candidates. noway_assert(!call->IsTailPrefixedCall()); noway_assert(!call->IsImplicitTailCall() || !gtIsRecursiveCall(call)); // // Calling inlinee's compiler to inline the method. // unsigned startVars = lvaCount; #ifdef DEBUG if (verbose) { printf("Expanding INLINE_CANDIDATE in statement "); printStmtID(fgMorphStmt); printf(" in " FMT_BB ":\n", compCurBB->bbNum); gtDispStmt(fgMorphStmt); if (call->IsImplicitTailCall()) { printf("Note: candidate is implicit tail call\n"); } } #endif impInlineRoot()->m_inlineStrategy->NoteAttempt(result); // // Invoke the compiler to inline the call. // fgInvokeInlineeCompiler(call, result, createdContext); if (result->IsFailure()) { // Undo some changes made in anticipation of inlining... // Zero out the used locals memset(lvaTable + startVars, 0, (lvaCount - startVars) * sizeof(*lvaTable)); for (unsigned i = startVars; i < lvaCount; i++) { new (&lvaTable[i], jitstd::placement_t()) LclVarDsc(); // call the constructor. } lvaCount = startVars; #ifdef DEBUG if (verbose) { // printf("Inlining failed. Restore lvaCount to %d.\n", lvaCount); } #endif return; } #ifdef DEBUG if (verbose) { // printf("After inlining lvaCount=%d.\n", lvaCount); } #endif } //------------------------------------------------------------------------ // fgCanFastTailCall: Check to see if this tail call can be optimized as epilog+jmp. // // Arguments: // callee - The callee to check // failReason - If this method returns false, the reason why. Can be nullptr. // // Return Value: // Returns true or false based on whether the callee can be fastTailCalled // // Notes: // This function is target specific and each target will make the fastTailCall // decision differently. See the notes below. // // This function calls fgInitArgInfo() to initialize the arg info table, which // is used to analyze the argument. This function can alter the call arguments // by adding argument IR nodes for non-standard arguments. // // Windows Amd64: // A fast tail call can be made whenever the number of callee arguments // is less than or equal to the number of caller arguments, or we have four // or fewer callee arguments. This is because, on Windows AMD64, each // argument uses exactly one register or one 8-byte stack slot. Thus, we only // need to count arguments, and not be concerned with the size of each // incoming or outgoing argument. // // Can fast tail call examples (amd64 Windows): // // -- Callee will have all register arguments -- // caller(int, int, int, int) // callee(int, int, float, int) // // -- Callee requires stack space that is equal or less than the caller -- // caller(struct, struct, struct, struct, struct, struct) // callee(int, int, int, int, int, int) // // -- Callee requires stack space that is less than the caller -- // caller(struct, double, struct, float, struct, struct) // callee(int, int, int, int, int) // // -- Callee will have all register arguments -- // caller(int) // callee(int, int, int, int) // // Cannot fast tail call examples (amd64 Windows): // // -- Callee requires stack space that is larger than the caller -- // caller(struct, double, struct, float, struct, struct) // callee(int, int, int, int, int, double, double, double) // // -- Callee has a byref struct argument -- // caller(int, int, int) // callee(struct(size 3 bytes)) // // Unix Amd64 && Arm64: // A fastTailCall decision can be made whenever the callee's stack space is // less than or equal to the caller's stack space. There are many permutations // of when the caller and callee have different stack sizes if there are // structs being passed to either the caller or callee. // // Exceptions: // If the callee has a 9 to 16 byte struct argument and the callee has // stack arguments, the decision will be to not fast tail call. This is // because before fgMorphArgs is done, the struct is unknown whether it // will be placed on the stack or enregistered. Therefore, the conservative // decision of do not fast tail call is taken. This limitations should be // removed if/when fgMorphArgs no longer depends on fgCanFastTailCall. // // Can fast tail call examples (amd64 Unix): // // -- Callee will have all register arguments -- // caller(int, int, int, int) // callee(int, int, float, int) // // -- Callee requires stack space that is equal to the caller -- // caller({ long, long }, { int, int }, { int }, { int }, { int }, { int }) -- 6 int register arguments, 16 byte // stack // space // callee(int, int, int, int, int, int, int, int) -- 6 int register arguments, 16 byte stack space // // -- Callee requires stack space that is less than the caller -- // caller({ long, long }, int, { long, long }, int, { long, long }, { long, long }) 6 int register arguments, 32 byte // stack // space // callee(int, int, int, int, int, int, { long, long } ) // 6 int register arguments, 16 byte stack space // // -- Callee will have all register arguments -- // caller(int) // callee(int, int, int, int) // // Cannot fast tail call examples (amd64 Unix): // // -- Callee requires stack space that is larger than the caller -- // caller(float, float, float, float, float, float, float, float) -- 8 float register arguments // callee(int, int, int, int, int, int, int, int) -- 6 int register arguments, 16 byte stack space // // -- Callee has structs which cannot be enregistered (Implementation Limitation) -- // caller(float, float, float, float, float, float, float, float, { double, double, double }) -- 8 float register // arguments, 24 byte stack space // callee({ double, double, double }) -- 24 bytes stack space // // -- Callee requires stack space and has a struct argument >8 bytes and <16 bytes (Implementation Limitation) -- // caller(int, int, int, int, int, int, { double, double, double }) -- 6 int register arguments, 24 byte stack space // callee(int, int, int, int, int, int, { int, int }) -- 6 int registers, 16 byte stack space // // -- Caller requires stack space and nCalleeArgs > nCallerArgs (Bug) -- // caller({ double, double, double, double, double, double }) // 48 byte stack // callee(int, int) -- 2 int registers bool Compiler::fgCanFastTailCall(GenTreeCall* callee, const char** failReason) { #if FEATURE_FASTTAILCALL // To reach here means that the return types of the caller and callee are tail call compatible. // In the case of structs that can be returned in a register, compRetNativeType is set to the actual return type. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if (callee->IsTailPrefixedCall()) { var_types retType = info.compRetType; assert(impTailCallRetTypeCompatible(false, retType, info.compMethodInfo->args.retTypeClass, info.compCallConv, (var_types)callee->gtReturnType, callee->gtRetClsHnd, callee->GetUnmanagedCallConv())); } #endif assert(!callee->AreArgsComplete()); fgInitArgInfo(callee); fgArgInfo* argInfo = callee->fgArgInfo; unsigned calleeArgStackSize = 0; unsigned callerArgStackSize = info.compArgStackSize; for (unsigned index = 0; index < argInfo->ArgCount(); ++index) { fgArgTabEntry* arg = argInfo->GetArgEntry(index, false); calleeArgStackSize = roundUp(calleeArgStackSize, arg->GetByteAlignment()); calleeArgStackSize += arg->GetStackByteSize(); } calleeArgStackSize = GetOutgoingArgByteSize(calleeArgStackSize); auto reportFastTailCallDecision = [&](const char* thisFailReason) { if (failReason != nullptr) { *failReason = thisFailReason; } #ifdef DEBUG if ((JitConfig.JitReportFastTailCallDecisions()) == 1) { if (callee->gtCallType != CT_INDIRECT) { const char* methodName; methodName = eeGetMethodFullName(callee->gtCallMethHnd); printf("[Fast tailcall decision]: Caller: %s\n[Fast tailcall decision]: Callee: %s -- Decision: ", info.compFullName, methodName); } else { printf("[Fast tailcall decision]: Caller: %s\n[Fast tailcall decision]: Callee: IndirectCall -- " "Decision: ", info.compFullName); } if (thisFailReason == nullptr) { printf("Will fast tailcall"); } else { printf("Will not fast tailcall (%s)", thisFailReason); } printf(" (CallerArgStackSize: %d, CalleeArgStackSize: %d)\n\n", callerArgStackSize, calleeArgStackSize); } else { if (thisFailReason == nullptr) { JITDUMP("[Fast tailcall decision]: Will fast tailcall\n"); } else { JITDUMP("[Fast tailcall decision]: Will not fast tailcall (%s)\n", thisFailReason); } } #endif // DEBUG }; if (!opts.compFastTailCalls) { reportFastTailCallDecision("Configuration doesn't allow fast tail calls"); return false; } if (callee->IsStressTailCall()) { reportFastTailCallDecision("Fast tail calls are not performed under tail call stress"); return false; } // Note on vararg methods: // If the caller is vararg method, we don't know the number of arguments passed by caller's caller. // But we can be sure that in-coming arg area of vararg caller would be sufficient to hold its // fixed args. Therefore, we can allow a vararg method to fast tail call other methods as long as // out-going area required for callee is bounded by caller's fixed argument space. // // Note that callee being a vararg method is not a problem since we can account the params being passed. // // We will currently decide to not fast tail call on Windows armarch if the caller or callee is a vararg // method. This is due to the ABI differences for native vararg methods for these platforms. There is // work required to shuffle arguments to the correct locations. CLANG_FORMAT_COMMENT_ANCHOR; if (TargetOS::IsWindows && TargetArchitecture::IsArmArch && (info.compIsVarArgs || callee->IsVarargs())) { reportFastTailCallDecision("Fast tail calls with varargs not supported on Windows ARM/ARM64"); return false; } if (compLocallocUsed) { reportFastTailCallDecision("Localloc used"); return false; } #ifdef TARGET_AMD64 // Needed for Jit64 compat. // In future, enabling fast tail calls from methods that need GS cookie // check would require codegen side work to emit GS cookie check before a // tail call. if (getNeedsGSSecurityCookie()) { reportFastTailCallDecision("GS Security cookie check required"); return false; } #endif // If the NextCallReturnAddress intrinsic is used we should do normal calls. if (info.compHasNextCallRetAddr) { reportFastTailCallDecision("Uses NextCallReturnAddress intrinsic"); return false; } if (callee->HasRetBufArg()) // RetBuf { // If callee has RetBuf param, caller too must have it. // Otherwise go the slow route. if (info.compRetBuffArg == BAD_VAR_NUM) { reportFastTailCallDecision("Callee has RetBuf but caller does not."); return false; } } // For a fast tail call the caller will use its incoming arg stack space to place // arguments, so if the callee requires more arg stack space than is available here // the fast tail call cannot be performed. This is common to all platforms. // Note that the GC'ness of on stack args need not match since the arg setup area is marked // as non-interruptible for fast tail calls. if (calleeArgStackSize > callerArgStackSize) { reportFastTailCallDecision("Not enough incoming arg space"); return false; } // For Windows some struct parameters are copied on the local frame // and then passed by reference. We cannot fast tail call in these situation // as we need to keep our frame around. if (fgCallHasMustCopyByrefParameter(callee)) { reportFastTailCallDecision("Callee has a byref parameter"); return false; } reportFastTailCallDecision(nullptr); return true; #else // FEATURE_FASTTAILCALL if (failReason) *failReason = "Fast tailcalls are not supported on this platform"; return false; #endif } //------------------------------------------------------------------------ // fgCallHasMustCopyByrefParameter: Check to see if this call has a byref parameter that // requires a struct copy in the caller. // // Arguments: // callee - The callee to check // // Return Value: // Returns true or false based on whether this call has a byref parameter that // requires a struct copy in the caller. #if FEATURE_FASTTAILCALL bool Compiler::fgCallHasMustCopyByrefParameter(GenTreeCall* callee) { fgArgInfo* argInfo = callee->fgArgInfo; bool hasMustCopyByrefParameter = false; for (unsigned index = 0; index < argInfo->ArgCount(); ++index) { fgArgTabEntry* arg = argInfo->GetArgEntry(index, false); if (arg->isStruct) { if (arg->passedByRef) { // Generally a byref arg will block tail calling, as we have to // make a local copy of the struct for the callee. hasMustCopyByrefParameter = true; // If we're optimizing, we may be able to pass our caller's byref to our callee, // and so still be able to avoid a struct copy. if (opts.OptimizationEnabled()) { // First, see if this arg is an implicit byref param. GenTreeLclVar* const lcl = arg->GetNode()->IsImplicitByrefParameterValue(this); if (lcl != nullptr) { // Yes, the arg is an implicit byref param. const unsigned lclNum = lcl->GetLclNum(); LclVarDsc* const varDsc = lvaGetDesc(lcl); // The param must not be promoted; if we've promoted, then the arg will be // a local struct assembled from the promoted fields. if (varDsc->lvPromoted) { JITDUMP("Arg [%06u] is promoted implicit byref V%02u, so no tail call\n", dspTreeID(arg->GetNode()), lclNum); } else { JITDUMP("Arg [%06u] is unpromoted implicit byref V%02u, seeing if we can still tail call\n", dspTreeID(arg->GetNode()), lclNum); // We have to worry about introducing aliases if we bypass copying // the struct at the call. We'll do some limited analysis to see if we // can rule this out. const unsigned argLimit = 6; // If this is the only appearance of the byref in the method, then // aliasing is not possible. // // If no other call arg refers to this byref, and no other arg is // a pointer which could refer to this byref, we can optimize. // // We only check this for calls with small numbers of arguments, // as the analysis cost will be quadratic. // const unsigned totalAppearances = varDsc->lvRefCnt(RCS_EARLY); const unsigned callAppearances = (unsigned)varDsc->lvRefCntWtd(RCS_EARLY); assert(totalAppearances >= callAppearances); if (totalAppearances == 1) { JITDUMP("... yes, arg is the only appearance of V%02u\n", lclNum); hasMustCopyByrefParameter = false; } else if (totalAppearances > callAppearances) { // lvRefCntWtd tracks the number of appearances of the arg at call sites. // If this number doesn't match the regular ref count, there is // a non-call appearance, and we must be conservative. // JITDUMP("... no, arg has %u non-call appearance(s)\n", totalAppearances - callAppearances); } else if (argInfo->ArgCount() <= argLimit) { JITDUMP("... all %u appearance(s) are as implicit byref args to calls.\n" "... Running alias analysis on this call's args\n", totalAppearances); GenTree* interferingArg = nullptr; for (unsigned index2 = 0; index2 < argInfo->ArgCount(); ++index2) { if (index2 == index) { continue; } fgArgTabEntry* const arg2 = argInfo->GetArgEntry(index2, false); JITDUMP("... checking other arg [%06u]...\n", dspTreeID(arg2->GetNode())); DISPTREE(arg2->GetNode()); // Do we pass 'lcl' more than once to the callee? if (arg2->isStruct && arg2->passedByRef) { GenTreeLclVarCommon* const lcl2 = arg2->GetNode()->IsImplicitByrefParameterValue(this); if ((lcl2 != nullptr) && (lclNum == lcl2->GetLclNum())) { // not copying would introduce aliased implicit byref structs // in the callee ... we can't optimize. interferingArg = arg2->GetNode(); break; } else { JITDUMP("... arg refers to different implicit byref V%02u\n", lcl2->GetLclNum()); continue; } } // Do we pass a byref pointer which might point within 'lcl'? // // We can assume the 'lcl' is unaliased on entry to the // method, so the only way we can have an aliasing byref pointer at // the call is if 'lcl' is address taken/exposed in the method. // // Note even though 'lcl' is not promoted, we are in the middle // of the promote->rewrite->undo->(morph)->demote cycle, and so // might see references to promoted fields of 'lcl' that haven't yet // been demoted (see fgMarkDemotedImplicitByRefArgs). // // So, we also need to scan all 'lcl's fields, if any, to see if they // are exposed. // // When looking for aliases from other args, we check for both TYP_BYREF // and TYP_I_IMPL typed args here. Conceptually anything that points into // an implicit byref parameter should be TYP_BYREF, as these parameters could // refer to boxed heap locations (say if the method is invoked by reflection) // but there are some stack only structs (like typed references) where // the importer/runtime code uses TYP_I_IMPL, and fgInitArgInfo will // transiently retype all simple address-of implicit parameter args as // TYP_I_IMPL. // if ((arg2->argType == TYP_BYREF) || (arg2->argType == TYP_I_IMPL)) { JITDUMP("...arg is a byref, must run an alias check\n"); bool checkExposure = true; bool hasExposure = false; // See if there is any way arg could refer to a parameter struct. GenTree* arg2Node = arg2->GetNode(); if (arg2Node->OperIs(GT_LCL_VAR)) { GenTreeLclVarCommon* arg2LclNode = arg2Node->AsLclVarCommon(); assert(arg2LclNode->GetLclNum() != lclNum); LclVarDsc* arg2Dsc = lvaGetDesc(arg2LclNode); // Other params can't alias implicit byref params if (arg2Dsc->lvIsParam) { checkExposure = false; } } // Because we're checking TYP_I_IMPL above, at least // screen out obvious things that can't cause aliases. else if (arg2Node->IsIntegralConst()) { checkExposure = false; } if (checkExposure) { JITDUMP( "... not sure where byref arg points, checking if V%02u is exposed\n", lclNum); // arg2 might alias arg, see if we've exposed // arg somewhere in the method. if (varDsc->lvHasLdAddrOp || varDsc->IsAddressExposed()) { // Struct as a whole is exposed, can't optimize JITDUMP("... V%02u is exposed\n", lclNum); hasExposure = true; } else if (varDsc->lvFieldLclStart != 0) { // This is the promoted/undone struct case. // // The field start is actually the local number of the promoted local, // use it to enumerate the fields. const unsigned promotedLcl = varDsc->lvFieldLclStart; LclVarDsc* const promotedVarDsc = lvaGetDesc(promotedLcl); JITDUMP("...promoted-unpromoted case -- also checking exposure of " "fields of V%02u\n", promotedLcl); for (unsigned fieldIndex = 0; fieldIndex < promotedVarDsc->lvFieldCnt; fieldIndex++) { LclVarDsc* fieldDsc = lvaGetDesc(promotedVarDsc->lvFieldLclStart + fieldIndex); if (fieldDsc->lvHasLdAddrOp || fieldDsc->IsAddressExposed()) { // Promoted and not yet demoted field is exposed, can't optimize JITDUMP("... field V%02u is exposed\n", promotedVarDsc->lvFieldLclStart + fieldIndex); hasExposure = true; break; } } } } if (hasExposure) { interferingArg = arg2->GetNode(); break; } } else { JITDUMP("...arg is not a byref or implicit byref (%s)\n", varTypeName(arg2->GetNode()->TypeGet())); } } if (interferingArg != nullptr) { JITDUMP("... no, arg [%06u] may alias with V%02u\n", dspTreeID(interferingArg), lclNum); } else { JITDUMP("... yes, no other arg in call can alias V%02u\n", lclNum); hasMustCopyByrefParameter = false; } } else { JITDUMP(" ... no, call has %u > %u args, alias analysis deemed too costly\n", argInfo->ArgCount(), argLimit); } } } } if (hasMustCopyByrefParameter) { // This arg requires a struct copy. No reason to keep scanning the remaining args. break; } } } } return hasMustCopyByrefParameter; } #endif //------------------------------------------------------------------------ // fgMorphPotentialTailCall: Attempt to morph a call that the importer has // identified as a potential tailcall to an actual tailcall and return the // placeholder node to use in this case. // // Arguments: // call - The call to morph. // // Return Value: // Returns a node to use if the call was morphed into a tailcall. If this // function returns a node the call is done being morphed and the new node // should be used. Otherwise the call will have been demoted to a regular call // and should go through normal morph. // // Notes: // This is called only for calls that the importer has already identified as // potential tailcalls. It will do profitability and legality checks and // classify which kind of tailcall we are able to (or should) do, along with // modifying the trees to perform that kind of tailcall. // GenTree* Compiler::fgMorphPotentialTailCall(GenTreeCall* call) { // It should either be an explicit (i.e. tail prefixed) or an implicit tail call assert(call->IsTailPrefixedCall() ^ call->IsImplicitTailCall()); // It cannot be an inline candidate assert(!call->IsInlineCandidate()); auto failTailCall = [&](const char* reason, unsigned lclNum = BAD_VAR_NUM) { #ifdef DEBUG if (verbose) { printf("\nRejecting tail call in morph for call "); printTreeID(call); printf(": %s", reason); if (lclNum != BAD_VAR_NUM) { printf(" V%02u", lclNum); } printf("\n"); } #endif // for non user funcs, we have no handles to report info.compCompHnd->reportTailCallDecision(nullptr, (call->gtCallType == CT_USER_FUNC) ? call->gtCallMethHnd : nullptr, call->IsTailPrefixedCall(), TAILCALL_FAIL, reason); // We have checked the candidate so demote. call->gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL; #if FEATURE_TAILCALL_OPT call->gtCallMoreFlags &= ~GTF_CALL_M_IMPLICIT_TAILCALL; #endif }; if (call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) { failTailCall("Might turn into an intrinsic"); return nullptr; } if (call->IsNoReturn() && !call->IsTailPrefixedCall()) { // Such tail calls always throw an exception and we won't be able to see current // Caller() in the stacktrace. failTailCall("Never returns"); return nullptr; } #ifdef DEBUG if (opts.compGcChecks && (info.compRetType == TYP_REF)) { failTailCall("COMPlus_JitGCChecks or stress might have interposed a call to CORINFO_HELP_CHECK_OBJ, " "invalidating tailcall opportunity"); return nullptr; } #endif // We have to ensure to pass the incoming retValBuf as the // outgoing one. Using a temp will not do as this function will // not regain control to do the copy. This can happen when inlining // a tailcall which also has a potential tailcall in it: the IL looks // like we can do a tailcall, but the trees generated use a temp for the inlinee's // result. TODO-CQ: Fix this. if (info.compRetBuffArg != BAD_VAR_NUM) { noway_assert(call->TypeGet() == TYP_VOID); GenTree* retValBuf = call->gtCallArgs->GetNode(); if (retValBuf->gtOper != GT_LCL_VAR || retValBuf->AsLclVarCommon()->GetLclNum() != info.compRetBuffArg) { failTailCall("Need to copy return buffer"); return nullptr; } } // We are still not sure whether it can be a tail call. Because, when converting // a call to an implicit tail call, we must check that there are no locals with // their address taken. If this is the case, we have to assume that the address // has been leaked and the current stack frame must live until after the final // call. // Verify that none of vars has lvHasLdAddrOp or IsAddressExposed() bit set. Note // that lvHasLdAddrOp is much more conservative. We cannot just base it on // IsAddressExposed() alone since it is not guaranteed to be set on all VarDscs // during morph stage. The reason for also checking IsAddressExposed() is that in case // of vararg methods user args are marked as addr exposed but not lvHasLdAddrOp. // The combination of lvHasLdAddrOp and IsAddressExposed() though conservative allows us // never to be incorrect. // // TODO-Throughput: have a compiler level flag to indicate whether method has vars whose // address is taken. Such a flag could be set whenever lvHasLdAddrOp or IsAddressExposed() // is set. This avoids the need for iterating through all lcl vars of the current // method. Right now throughout the code base we are not consistently using 'set' // method to set lvHasLdAddrOp and IsAddressExposed() flags. bool isImplicitOrStressTailCall = call->IsImplicitTailCall() || call->IsStressTailCall(); if (isImplicitOrStressTailCall && compLocallocUsed) { failTailCall("Localloc used"); return nullptr; } bool hasStructParam = false; for (unsigned varNum = 0; varNum < lvaCount; varNum++) { LclVarDsc* varDsc = lvaGetDesc(varNum); // If the method is marked as an explicit tail call we will skip the // following three hazard checks. // We still must check for any struct parameters and set 'hasStructParam' // so that we won't transform the recursive tail call into a loop. // if (isImplicitOrStressTailCall) { if (varDsc->lvHasLdAddrOp && !lvaIsImplicitByRefLocal(varNum)) { failTailCall("Local address taken", varNum); return nullptr; } if (varDsc->IsAddressExposed()) { if (lvaIsImplicitByRefLocal(varNum)) { // The address of the implicit-byref is a non-address use of the pointer parameter. } else if (varDsc->lvIsStructField && lvaIsImplicitByRefLocal(varDsc->lvParentLcl)) { // The address of the implicit-byref's field is likewise a non-address use of the pointer // parameter. } else if (varDsc->lvPromoted && (lvaTable[varDsc->lvFieldLclStart].lvParentLcl != varNum)) { // This temp was used for struct promotion bookkeeping. It will not be used, and will have // its ref count and address-taken flag reset in fgMarkDemotedImplicitByRefArgs. assert(lvaIsImplicitByRefLocal(lvaTable[varDsc->lvFieldLclStart].lvParentLcl)); assert(fgGlobalMorph); } else { failTailCall("Local address taken", varNum); return nullptr; } } if (varDsc->lvPromoted && varDsc->lvIsParam && !lvaIsImplicitByRefLocal(varNum)) { failTailCall("Has Struct Promoted Param", varNum); return nullptr; } if (varDsc->lvPinned) { // A tail call removes the method from the stack, which means the pinning // goes away for the callee. We can't allow that. failTailCall("Has Pinned Vars", varNum); return nullptr; } } if (varTypeIsStruct(varDsc->TypeGet()) && varDsc->lvIsParam) { hasStructParam = true; // This prevents transforming a recursive tail call into a loop // but doesn't prevent tail call optimization so we need to // look at the rest of parameters. } } if (!fgCheckStmtAfterTailCall()) { failTailCall("Unexpected statements after the tail call"); return nullptr; } const char* failReason = nullptr; bool canFastTailCall = fgCanFastTailCall(call, &failReason); CORINFO_TAILCALL_HELPERS tailCallHelpers; bool tailCallViaJitHelper = false; if (!canFastTailCall) { if (call->IsImplicitTailCall()) { // Implicit or opportunistic tail calls are always dispatched via fast tail call // mechanism and never via tail call helper for perf. failTailCall(failReason); return nullptr; } assert(call->IsTailPrefixedCall()); assert(call->tailCallInfo != nullptr); // We do not currently handle non-standard args except for VSD stubs. if (!call->IsVirtualStub() && call->HasNonStandardAddedArgs(this)) { failTailCall( "Method with non-standard args passed in callee trash register cannot be tail called via helper"); return nullptr; } // On x86 we have a faster mechanism than the general one which we use // in almost all cases. See fgCanTailCallViaJitHelper for more information. if (fgCanTailCallViaJitHelper()) { tailCallViaJitHelper = true; } else { // Make sure we can get the helpers. We do this last as the runtime // will likely be required to generate these. CORINFO_RESOLVED_TOKEN* token = nullptr; CORINFO_SIG_INFO* sig = call->tailCallInfo->GetSig(); unsigned flags = 0; if (!call->tailCallInfo->IsCalli()) { token = call->tailCallInfo->GetToken(); if (call->tailCallInfo->IsCallvirt()) { flags |= CORINFO_TAILCALL_IS_CALLVIRT; } } if (call->gtCallThisArg != nullptr) { var_types thisArgType = call->gtCallThisArg->GetNode()->TypeGet(); if (thisArgType != TYP_REF) { flags |= CORINFO_TAILCALL_THIS_ARG_IS_BYREF; } } if (!info.compCompHnd->getTailCallHelpers(token, sig, (CORINFO_GET_TAILCALL_HELPERS_FLAGS)flags, &tailCallHelpers)) { failTailCall("Tail call help not available"); return nullptr; } } } // Check if we can make the tailcall a loop. bool fastTailCallToLoop = false; #if FEATURE_TAILCALL_OPT // TODO-CQ: enable the transformation when the method has a struct parameter that can be passed in a register // or return type is a struct that can be passed in a register. // // TODO-CQ: if the method being compiled requires generic context reported in gc-info (either through // hidden generic context param or through keep alive thisptr), then while transforming a recursive // call to such a method requires that the generic context stored on stack slot be updated. Right now, // fgMorphRecursiveFastTailCallIntoLoop() is not handling update of generic context while transforming // a recursive call into a loop. Another option is to modify gtIsRecursiveCall() to check that the // generic type parameters of both caller and callee generic method are the same. if (opts.compTailCallLoopOpt && canFastTailCall && gtIsRecursiveCall(call) && !lvaReportParamTypeArg() && !lvaKeepAliveAndReportThis() && !call->IsVirtual() && !hasStructParam && !varTypeIsStruct(call->TypeGet())) { fastTailCallToLoop = true; } #endif // Ok -- now we are committed to performing a tailcall. Report the decision. CorInfoTailCall tailCallResult; if (fastTailCallToLoop) { tailCallResult = TAILCALL_RECURSIVE; } else if (canFastTailCall) { tailCallResult = TAILCALL_OPTIMIZED; } else { tailCallResult = TAILCALL_HELPER; } info.compCompHnd->reportTailCallDecision(nullptr, (call->gtCallType == CT_USER_FUNC) ? call->gtCallMethHnd : nullptr, call->IsTailPrefixedCall(), tailCallResult, nullptr); // Are we currently planning to expand the gtControlExpr as an early virtual call target? // if (call->IsExpandedEarly() && call->IsVirtualVtable()) { // It isn't alway profitable to expand a virtual call early // // We alway expand the TAILCALL_HELPER type late. // And we exapnd late when we have an optimized tail call // and the this pointer needs to be evaluated into a temp. // if (tailCallResult == TAILCALL_HELPER) { // We will alway expand this late in lower instead. // (see LowerTailCallViaJitHelper as it needs some work // for us to be able to expand this earlier in morph) // call->ClearExpandedEarly(); } else if ((tailCallResult == TAILCALL_OPTIMIZED) && ((call->gtCallThisArg->GetNode()->gtFlags & GTF_SIDE_EFFECT) != 0)) { // We generate better code when we expand this late in lower instead. // call->ClearExpandedEarly(); } } // Now actually morph the call. compTailCallUsed = true; // This will prevent inlining this call. call->gtCallMoreFlags |= GTF_CALL_M_TAILCALL; if (tailCallViaJitHelper) { call->gtCallMoreFlags |= GTF_CALL_M_TAILCALL_VIA_JIT_HELPER; } #if FEATURE_TAILCALL_OPT if (fastTailCallToLoop) { call->gtCallMoreFlags |= GTF_CALL_M_TAILCALL_TO_LOOP; } #endif // Mark that this is no longer a pending tailcall. We need to do this before // we call fgMorphCall again (which happens in the fast tailcall case) to // avoid recursing back into this method. call->gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL; #if FEATURE_TAILCALL_OPT call->gtCallMoreFlags &= ~GTF_CALL_M_IMPLICIT_TAILCALL; #endif #ifdef DEBUG if (verbose) { printf("\nGTF_CALL_M_TAILCALL bit set for call "); printTreeID(call); printf("\n"); if (fastTailCallToLoop) { printf("\nGTF_CALL_M_TAILCALL_TO_LOOP bit set for call "); printTreeID(call); printf("\n"); } } #endif // For R2R we might need a different entry point for this call if we are doing a tailcall. // The reason is that the normal delay load helper uses the return address to find the indirection // cell in xarch, but now the JIT is expected to leave the indirection cell in REG_R2R_INDIRECT_PARAM: // We optimize delegate invocations manually in the JIT so skip this for those. if (call->IsR2RRelativeIndir() && canFastTailCall && !fastTailCallToLoop && !call->IsDelegateInvoke()) { info.compCompHnd->updateEntryPointForTailCall(&call->gtEntryPoint); #ifdef TARGET_XARCH // We have already computed arg info to make the fast tailcall decision, but on X64 we now // have to pass the indirection cell, so redo arg info. call->ResetArgInfo(); #endif } // If this block has a flow successor, make suitable updates. // BasicBlock* const nextBlock = compCurBB->GetUniqueSucc(); if (nextBlock == nullptr) { // No unique successor. compCurBB should be a return. // assert(compCurBB->bbJumpKind == BBJ_RETURN); } else { // Flow no longer reaches nextBlock from here. // fgRemoveRefPred(nextBlock, compCurBB); // Adjust profile weights. // // Note if this is a tail call to loop, further updates // are needed once we install the loop edge. // if (compCurBB->hasProfileWeight() && nextBlock->hasProfileWeight()) { // Since we have linear flow we can update the next block weight. // weight_t const blockWeight = compCurBB->bbWeight; weight_t const nextWeight = nextBlock->bbWeight; weight_t const newNextWeight = nextWeight - blockWeight; // If the math would result in a negative weight then there's // no local repair we can do; just leave things inconsistent. // if (newNextWeight >= 0) { // Note if we'd already morphed the IR in nextblock we might // have done something profile sensitive that we should arguably reconsider. // JITDUMP("Reducing profile weight of " FMT_BB " from " FMT_WT " to " FMT_WT "\n", nextBlock->bbNum, nextWeight, newNextWeight); nextBlock->setBBProfileWeight(newNextWeight); } else { JITDUMP("Not reducing profile weight of " FMT_BB " as its weight " FMT_WT " is less than direct flow pred " FMT_BB " weight " FMT_WT "\n", nextBlock->bbNum, nextWeight, compCurBB->bbNum, blockWeight); } // If nextBlock is not a BBJ_RETURN, it should have a unique successor that // is a BBJ_RETURN, as we allow a little bit of flow after a tail call. // if (nextBlock->bbJumpKind != BBJ_RETURN) { BasicBlock* retBlock = nextBlock->GetUniqueSucc(); // Check if we have a sequence of GT_ASG blocks where the same variable is assigned // to temp locals over and over. // Also allow casts on the RHSs of the assignments, and blocks with GT_NOPs. // // { GT_ASG(t_0, GT_CALL(...)) } // { GT_ASG(t_1, t0) } (with casts on rhs potentially) // ... // { GT_ASG(t_n, t_(n - 1)) } // { GT_RET t_n } // if (retBlock->bbJumpKind != BBJ_RETURN) { // Make sure the block has a single statement assert(nextBlock->firstStmt() == nextBlock->lastStmt()); // And the root node is "ASG(LCL_VAR, LCL_VAR)" GenTree* asgNode = nextBlock->firstStmt()->GetRootNode(); assert(asgNode->OperIs(GT_ASG)); unsigned lcl = asgNode->gtGetOp1()->AsLclVarCommon()->GetLclNum(); while (retBlock->bbJumpKind != BBJ_RETURN) { #ifdef DEBUG Statement* nonEmptyStmt = nullptr; for (Statement* const stmt : retBlock->Statements()) { // Ignore NOP statements if (!stmt->GetRootNode()->OperIs(GT_NOP)) { // Only a single non-NOP statement is allowed assert(nonEmptyStmt == nullptr); nonEmptyStmt = stmt; } } if (nonEmptyStmt != nullptr) { asgNode = nonEmptyStmt->GetRootNode(); if (!asgNode->OperIs(GT_NOP)) { assert(asgNode->OperIs(GT_ASG)); GenTree* rhs = asgNode->gtGetOp2(); while (rhs->OperIs(GT_CAST)) { assert(!rhs->gtOverflow()); rhs = rhs->gtGetOp1(); } assert(lcl == rhs->AsLclVarCommon()->GetLclNum()); lcl = asgNode->gtGetOp1()->AsLclVarCommon()->GetLclNum(); } } #endif retBlock = retBlock->GetUniqueSucc(); } } assert(retBlock->bbJumpKind == BBJ_RETURN); if (retBlock->hasProfileWeight()) { // Do similar updates here. // weight_t const nextNextWeight = retBlock->bbWeight; weight_t const newNextNextWeight = nextNextWeight - blockWeight; // If the math would result in an negative weight then there's // no local repair we can do; just leave things inconsistent. // if (newNextNextWeight >= 0) { JITDUMP("Reducing profile weight of " FMT_BB " from " FMT_WT " to " FMT_WT "\n", retBlock->bbNum, nextNextWeight, newNextNextWeight); retBlock->setBBProfileWeight(newNextNextWeight); } else { JITDUMP("Not reducing profile weight of " FMT_BB " as its weight " FMT_WT " is less than direct flow pred " FMT_BB " weight " FMT_WT "\n", retBlock->bbNum, nextNextWeight, compCurBB->bbNum, blockWeight); } } } } } #if !FEATURE_TAILCALL_OPT_SHARED_RETURN // We enable shared-ret tail call optimization for recursive calls even if // FEATURE_TAILCALL_OPT_SHARED_RETURN is not defined. if (gtIsRecursiveCall(call)) #endif { // Many tailcalls will have call and ret in the same block, and thus be // BBJ_RETURN, but if the call falls through to a ret, and we are doing a // tailcall, change it here. compCurBB->bbJumpKind = BBJ_RETURN; } GenTree* stmtExpr = fgMorphStmt->GetRootNode(); #ifdef DEBUG // Tail call needs to be in one of the following IR forms // Either a call stmt or // GT_RETURN(GT_CALL(..)) or GT_RETURN(GT_CAST(GT_CALL(..))) // var = GT_CALL(..) or var = (GT_CAST(GT_CALL(..))) // GT_COMMA(GT_CALL(..), GT_NOP) or GT_COMMA(GT_CAST(GT_CALL(..)), GT_NOP) // In the above, // GT_CASTS may be nested. genTreeOps stmtOper = stmtExpr->gtOper; if (stmtOper == GT_CALL) { assert(stmtExpr == call); } else { assert(stmtOper == GT_RETURN || stmtOper == GT_ASG || stmtOper == GT_COMMA); GenTree* treeWithCall; if (stmtOper == GT_RETURN) { treeWithCall = stmtExpr->gtGetOp1(); } else if (stmtOper == GT_COMMA) { // Second operation must be nop. assert(stmtExpr->gtGetOp2()->IsNothingNode()); treeWithCall = stmtExpr->gtGetOp1(); } else { treeWithCall = stmtExpr->gtGetOp2(); } // Peel off casts while (treeWithCall->gtOper == GT_CAST) { assert(!treeWithCall->gtOverflow()); treeWithCall = treeWithCall->gtGetOp1(); } assert(treeWithCall == call); } #endif // Store the call type for later to introduce the correct placeholder. var_types origCallType = call->TypeGet(); GenTree* result; if (!canFastTailCall && !tailCallViaJitHelper) { // For tailcall via CORINFO_TAILCALL_HELPERS we transform into regular // calls with (to the JIT) regular control flow so we do not need to do // much special handling. result = fgMorphTailCallViaHelpers(call, tailCallHelpers); } else { // Otherwise we will transform into something that does not return. For // fast tailcalls a "jump" and for tailcall via JIT helper a call to a // JIT helper that does not return. So peel off everything after the // call. Statement* nextMorphStmt = fgMorphStmt->GetNextStmt(); JITDUMP("Remove all stmts after the call.\n"); while (nextMorphStmt != nullptr) { Statement* stmtToRemove = nextMorphStmt; nextMorphStmt = stmtToRemove->GetNextStmt(); fgRemoveStmt(compCurBB, stmtToRemove); } bool isRootReplaced = false; GenTree* root = fgMorphStmt->GetRootNode(); if (root != call) { JITDUMP("Replace root node [%06d] with [%06d] tail call node.\n", dspTreeID(root), dspTreeID(call)); isRootReplaced = true; fgMorphStmt->SetRootNode(call); } // Avoid potential extra work for the return (for example, vzeroupper) call->gtType = TYP_VOID; // The runtime requires that we perform a null check on the `this` argument before // tail calling to a virtual dispatch stub. This requirement is a consequence of limitations // in the runtime's ability to map an AV to a NullReferenceException if // the AV occurs in a dispatch stub that has unmanaged caller. if (call->IsVirtualStub()) { call->gtFlags |= GTF_CALL_NULLCHECK; } // Do some target-specific transformations (before we process the args, // etc.) for the JIT helper case. if (tailCallViaJitHelper) { fgMorphTailCallViaJitHelper(call); // Force re-evaluating the argInfo. fgMorphTailCallViaJitHelper will modify the // argument list, invalidating the argInfo. call->fgArgInfo = nullptr; } // Tail call via JIT helper: The VM can't use return address hijacking // if we're not going to return and the helper doesn't have enough info // to safely poll, so we poll before the tail call, if the block isn't // already safe. Since tail call via helper is a slow mechanism it // doen't matter whether we emit GC poll. his is done to be in parity // with Jit64. Also this avoids GC info size increase if all most all // methods are expected to be tail calls (e.g. F#). // // Note that we can avoid emitting GC-poll if we know that the current // BB is dominated by a Gc-SafePoint block. But we don't have dominator // info at this point. One option is to just add a place holder node for // GC-poll (e.g. GT_GCPOLL) here and remove it in lowering if the block // is dominated by a GC-SafePoint. For now it not clear whether // optimizing slow tail calls is worth the effort. As a low cost check, // we check whether the first and current basic blocks are // GC-SafePoints. // // Fast Tail call as epilog+jmp - No need to insert GC-poll. Instead, // fgSetBlockOrder() is going to mark the method as fully interruptible // if the block containing this tail call is reachable without executing // any call. BasicBlock* curBlock = compCurBB; if (canFastTailCall || (fgFirstBB->bbFlags & BBF_GC_SAFE_POINT) || (compCurBB->bbFlags & BBF_GC_SAFE_POINT) || (fgCreateGCPoll(GCPOLL_INLINE, compCurBB) == curBlock)) { // We didn't insert a poll block, so we need to morph the call now // (Normally it will get morphed when we get to the split poll block) GenTree* temp = fgMorphCall(call); noway_assert(temp == call); } // Fast tail call: in case of fast tail calls, we need a jmp epilog and // hence mark it as BBJ_RETURN with BBF_JMP flag set. noway_assert(compCurBB->bbJumpKind == BBJ_RETURN); if (canFastTailCall) { compCurBB->bbFlags |= BBF_HAS_JMP; } else { // We call CORINFO_HELP_TAILCALL which does not return, so we will // not need epilogue. compCurBB->bbJumpKind = BBJ_THROW; } if (isRootReplaced) { // We have replaced the root node of this stmt and deleted the rest, // but we still have the deleted, dead nodes on the `fgMorph*` stack // if the root node was an `ASG`, `RET` or `CAST`. // Return a zero con node to exit morphing of the old trees without asserts // and forbid POST_ORDER morphing doing something wrong with our call. var_types callType; if (varTypeIsStruct(origCallType)) { CORINFO_CLASS_HANDLE retClsHnd = call->gtRetClsHnd; Compiler::structPassingKind howToReturnStruct; callType = getReturnTypeForStruct(retClsHnd, call->GetUnmanagedCallConv(), &howToReturnStruct); assert((howToReturnStruct != SPK_Unknown) && (howToReturnStruct != SPK_ByReference)); if (howToReturnStruct == SPK_ByValue) { callType = TYP_I_IMPL; } else if (howToReturnStruct == SPK_ByValueAsHfa || varTypeIsSIMD(callType)) { callType = TYP_FLOAT; } assert((callType != TYP_UNKNOWN) && !varTypeIsStruct(callType)); } else { callType = origCallType; } assert((callType != TYP_UNKNOWN) && !varTypeIsStruct(callType)); callType = genActualType(callType); GenTree* zero = gtNewZeroConNode(callType); result = fgMorphTree(zero); } else { result = call; } } return result; } //------------------------------------------------------------------------ // fgMorphTailCallViaHelpers: Transform the given GT_CALL tree for tailcall code // generation. // // Arguments: // call - The call to transform // helpers - The tailcall helpers provided by the runtime. // // Return Value: // Returns the transformed node. // // Notes: // This transforms // GT_CALL // {callTarget} // {this} // {args} // into // GT_COMMA // GT_CALL StoreArgsStub // {callTarget} (depending on flags provided by the runtime) // {this} (as a regular arg) // {args} // GT_COMMA // GT_CALL Dispatcher // GT_ADDR ReturnAddress // {CallTargetStub} // GT_ADDR ReturnValue // GT_LCL ReturnValue // whenever the call node returns a value. If the call node does not return a // value the last comma will not be there. // GenTree* Compiler::fgMorphTailCallViaHelpers(GenTreeCall* call, CORINFO_TAILCALL_HELPERS& help) { // R2R requires different handling but we don't support tailcall via // helpers in R2R yet, so just leave it for now. // TODO: R2R: TailCallViaHelper assert(!opts.IsReadyToRun()); JITDUMP("fgMorphTailCallViaHelpers (before):\n"); DISPTREE(call); // Don't support tail calling helper methods assert(call->gtCallType != CT_HELPER); // We come this route only for tail prefixed calls that cannot be dispatched as // fast tail calls assert(!call->IsImplicitTailCall()); // We want to use the following assert, but it can modify the IR in some cases, so we // can't do that in an assert. // assert(!fgCanFastTailCall(call, nullptr)); // We might or might not have called fgInitArgInfo before this point: in // builds with FEATURE_FASTTAILCALL we will have called it when checking if // we could do a fast tailcall, so it is possible we have added extra IR // for non-standard args that we must get rid of. Get rid of that IR here // and do this first as it will 'expose' the retbuf as the first arg, which // we rely upon in fgCreateCallDispatcherAndGetResult. call->ResetArgInfo(); GenTree* callDispatcherAndGetResult = fgCreateCallDispatcherAndGetResult(call, help.hCallTarget, help.hDispatcher); // Change the call to a call to the StoreArgs stub. if (call->HasRetBufArg()) { JITDUMP("Removing retbuf"); call->gtCallArgs = call->gtCallArgs->GetNext(); call->gtCallMoreFlags &= ~GTF_CALL_M_RETBUFFARG; } const bool stubNeedsTargetFnPtr = (help.flags & CORINFO_TAILCALL_STORE_TARGET) != 0; GenTree* doBeforeStoreArgsStub = nullptr; GenTree* thisPtrStubArg = nullptr; // Put 'this' in normal param list if (call->gtCallThisArg != nullptr) { JITDUMP("Moving this pointer into arg list\n"); GenTree* objp = call->gtCallThisArg->GetNode(); GenTree* thisPtr = nullptr; call->gtCallThisArg = nullptr; // JIT will need one or two copies of "this" in the following cases: // 1) the call needs null check; // 2) StoreArgs stub needs the target function pointer address and if the call is virtual // the stub also needs "this" in order to evalute the target. const bool callNeedsNullCheck = call->NeedsNullCheck(); const bool stubNeedsThisPtr = stubNeedsTargetFnPtr && call->IsVirtual(); // TODO-Review: The following transformation is implemented under assumption that // both conditions can be true. However, I could not construct such example // where a virtual tail call would require null check. In case, if the conditions // are mutually exclusive the following could be simplified. if (callNeedsNullCheck || stubNeedsThisPtr) { // Clone "this" if "this" has no side effects. if ((objp->gtFlags & GTF_SIDE_EFFECT) == 0) { thisPtr = gtClone(objp, true); } // Create a temp and spill "this" to the temp if "this" has side effects or "this" was too complex to clone. if (thisPtr == nullptr) { const unsigned lclNum = lvaGrabTemp(true DEBUGARG("tail call thisptr")); // tmp = "this" doBeforeStoreArgsStub = gtNewTempAssign(lclNum, objp); if (callNeedsNullCheck) { // COMMA(tmp = "this", deref(tmp)) GenTree* tmp = gtNewLclvNode(lclNum, objp->TypeGet()); GenTree* nullcheck = gtNewNullCheck(tmp, compCurBB); doBeforeStoreArgsStub = gtNewOperNode(GT_COMMA, TYP_VOID, doBeforeStoreArgsStub, nullcheck); } thisPtr = gtNewLclvNode(lclNum, objp->TypeGet()); if (stubNeedsThisPtr) { thisPtrStubArg = gtNewLclvNode(lclNum, objp->TypeGet()); } } else { if (callNeedsNullCheck) { // deref("this") doBeforeStoreArgsStub = gtNewNullCheck(objp, compCurBB); if (stubNeedsThisPtr) { thisPtrStubArg = gtClone(objp, true); } } else { assert(stubNeedsThisPtr); thisPtrStubArg = objp; } } call->gtFlags &= ~GTF_CALL_NULLCHECK; assert((thisPtrStubArg != nullptr) == stubNeedsThisPtr); } else { thisPtr = objp; } // During rationalization tmp="this" and null check will be materialized // in the right execution order. assert(thisPtr != nullptr); call->gtCallArgs = gtPrependNewCallArg(thisPtr, call->gtCallArgs); } // We may need to pass the target, for instance for calli or generic methods // where we pass instantiating stub. if (stubNeedsTargetFnPtr) { JITDUMP("Adding target since VM requested it\n"); GenTree* target; if (!call->IsVirtual()) { if (call->gtCallType == CT_INDIRECT) { noway_assert(call->gtCallAddr != nullptr); target = call->gtCallAddr; } else { CORINFO_CONST_LOOKUP addrInfo; info.compCompHnd->getFunctionEntryPoint(call->gtCallMethHnd, &addrInfo); CORINFO_GENERIC_HANDLE handle = nullptr; void* pIndirection = nullptr; assert(addrInfo.accessType != IAT_PPVALUE && addrInfo.accessType != IAT_RELPVALUE); if (addrInfo.accessType == IAT_VALUE) { handle = addrInfo.handle; } else if (addrInfo.accessType == IAT_PVALUE) { pIndirection = addrInfo.addr; } target = gtNewIconEmbHndNode(handle, pIndirection, GTF_ICON_FTN_ADDR, call->gtCallMethHnd); } } else { assert(!call->tailCallInfo->GetSig()->hasTypeArg()); CORINFO_CALL_INFO callInfo; unsigned flags = CORINFO_CALLINFO_LDFTN; if (call->tailCallInfo->IsCallvirt()) { flags |= CORINFO_CALLINFO_CALLVIRT; } eeGetCallInfo(call->tailCallInfo->GetToken(), nullptr, (CORINFO_CALLINFO_FLAGS)flags, &callInfo); target = getVirtMethodPointerTree(thisPtrStubArg, call->tailCallInfo->GetToken(), &callInfo); } // Insert target as last arg GenTreeCall::Use** newArgSlot = &call->gtCallArgs; while (*newArgSlot != nullptr) { newArgSlot = &(*newArgSlot)->NextRef(); } *newArgSlot = gtNewCallArgs(target); } // This is now a direct call to the store args stub and not a tailcall. call->gtCallType = CT_USER_FUNC; call->gtCallMethHnd = help.hStoreArgs; call->gtFlags &= ~GTF_CALL_VIRT_KIND_MASK; call->gtCallMoreFlags &= ~(GTF_CALL_M_TAILCALL | GTF_CALL_M_DELEGATE_INV | GTF_CALL_M_WRAPPER_DELEGATE_INV); // The store-args stub returns no value. call->gtRetClsHnd = nullptr; call->gtType = TYP_VOID; call->gtReturnType = TYP_VOID; GenTree* callStoreArgsStub = call; if (doBeforeStoreArgsStub != nullptr) { callStoreArgsStub = gtNewOperNode(GT_COMMA, TYP_VOID, doBeforeStoreArgsStub, callStoreArgsStub); } GenTree* finalTree = gtNewOperNode(GT_COMMA, callDispatcherAndGetResult->TypeGet(), callStoreArgsStub, callDispatcherAndGetResult); finalTree = fgMorphTree(finalTree); JITDUMP("fgMorphTailCallViaHelpers (after):\n"); DISPTREE(finalTree); return finalTree; } //------------------------------------------------------------------------ // fgCreateCallDispatcherAndGetResult: Given a call // CALL // {callTarget} // {retbuf} // {this} // {args} // create a similarly typed node that calls the tailcall dispatcher and returns // the result, as in the following: // COMMA // CALL TailCallDispatcher // ADDR ReturnAddress // &CallTargetFunc // ADDR RetValue // RetValue // If the call has type TYP_VOID, only create the CALL node. // // Arguments: // origCall - the call // callTargetStubHnd - the handle of the CallTarget function (this is a special // IL stub created by the runtime) // dispatcherHnd - the handle of the tailcall dispatcher function // // Return Value: // A node that can be used in place of the original call. // GenTree* Compiler::fgCreateCallDispatcherAndGetResult(GenTreeCall* origCall, CORINFO_METHOD_HANDLE callTargetStubHnd, CORINFO_METHOD_HANDLE dispatcherHnd) { GenTreeCall* callDispatcherNode = gtNewCallNode(CT_USER_FUNC, dispatcherHnd, TYP_VOID, nullptr, fgMorphStmt->GetDebugInfo()); // The dispatcher has signature // void DispatchTailCalls(void* callersRetAddrSlot, void* callTarget, void* retValue) // Add return value arg. GenTree* retValArg; GenTree* retVal = nullptr; unsigned int newRetLcl = BAD_VAR_NUM; GenTree* copyToRetBufNode = nullptr; if (origCall->HasRetBufArg()) { JITDUMP("Transferring retbuf\n"); GenTree* retBufArg = origCall->gtCallArgs->GetNode(); assert(info.compRetBuffArg != BAD_VAR_NUM); assert(retBufArg->OperIsLocal()); assert(retBufArg->AsLclVarCommon()->GetLclNum() == info.compRetBuffArg); // Caller return buffer argument retBufArg can point to GC heap while the dispatcher expects // the return value argument retValArg to point to the stack. // We use a temporary stack allocated return buffer to hold the value during the dispatcher call // and copy the value back to the caller return buffer after that. unsigned int tmpRetBufNum = lvaGrabTemp(true DEBUGARG("substitute local for return buffer")); constexpr bool unsafeValueClsCheck = false; lvaSetStruct(tmpRetBufNum, origCall->gtRetClsHnd, unsafeValueClsCheck); lvaSetVarAddrExposed(tmpRetBufNum DEBUGARG(AddressExposedReason::DISPATCH_RET_BUF)); var_types tmpRetBufType = lvaGetDesc(tmpRetBufNum)->TypeGet(); retValArg = gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(tmpRetBufNum, tmpRetBufType)); var_types callerRetBufType = lvaGetDesc(info.compRetBuffArg)->TypeGet(); GenTree* dstAddr = gtNewLclvNode(info.compRetBuffArg, callerRetBufType); GenTree* dst = gtNewObjNode(info.compMethodInfo->args.retTypeClass, dstAddr); GenTree* src = gtNewLclvNode(tmpRetBufNum, tmpRetBufType); constexpr bool isVolatile = false; constexpr bool isCopyBlock = true; copyToRetBufNode = gtNewBlkOpNode(dst, src, isVolatile, isCopyBlock); if (origCall->gtType != TYP_VOID) { retVal = gtClone(retBufArg); } } else if (origCall->gtType != TYP_VOID) { JITDUMP("Creating a new temp for the return value\n"); newRetLcl = lvaGrabTemp(false DEBUGARG("Return value for tail call dispatcher")); if (varTypeIsStruct(origCall->gtType)) { lvaSetStruct(newRetLcl, origCall->gtRetClsHnd, false); } else { // Since we pass a reference to the return value to the dispatcher // we need to use the real return type so we can normalize it on // load when we return it. lvaTable[newRetLcl].lvType = (var_types)origCall->gtReturnType; } lvaSetVarAddrExposed(newRetLcl DEBUGARG(AddressExposedReason::DISPATCH_RET_BUF)); retValArg = gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(newRetLcl, genActualType(lvaTable[newRetLcl].lvType))); retVal = gtNewLclvNode(newRetLcl, genActualType(lvaTable[newRetLcl].lvType)); if (varTypeIsStruct(origCall->gtType)) { retVal = impFixupStructReturnType(retVal, origCall->gtRetClsHnd, origCall->GetUnmanagedCallConv()); } } else { JITDUMP("No return value so using null pointer as arg\n"); retValArg = gtNewZeroConNode(TYP_I_IMPL); } callDispatcherNode->gtCallArgs = gtPrependNewCallArg(retValArg, callDispatcherNode->gtCallArgs); // Add callTarget callDispatcherNode->gtCallArgs = gtPrependNewCallArg(new (this, GT_FTN_ADDR) GenTreeFptrVal(TYP_I_IMPL, callTargetStubHnd), callDispatcherNode->gtCallArgs); // Add the caller's return address slot. if (lvaRetAddrVar == BAD_VAR_NUM) { lvaRetAddrVar = lvaGrabTemp(false DEBUGARG("Return address")); lvaTable[lvaRetAddrVar].lvType = TYP_I_IMPL; lvaSetVarAddrExposed(lvaRetAddrVar DEBUGARG(AddressExposedReason::DISPATCH_RET_BUF)); } GenTree* retAddrSlot = gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(lvaRetAddrVar, TYP_I_IMPL)); callDispatcherNode->gtCallArgs = gtPrependNewCallArg(retAddrSlot, callDispatcherNode->gtCallArgs); GenTree* finalTree = callDispatcherNode; if (copyToRetBufNode != nullptr) { finalTree = gtNewOperNode(GT_COMMA, TYP_VOID, callDispatcherNode, copyToRetBufNode); } if (origCall->gtType == TYP_VOID) { return finalTree; } assert(retVal != nullptr); finalTree = gtNewOperNode(GT_COMMA, origCall->TypeGet(), finalTree, retVal); // The JIT seems to want to CSE this comma and messes up multi-reg ret // values in the process. Just avoid CSE'ing this tree entirely in that // case. if (origCall->HasMultiRegRetVal()) { finalTree->gtFlags |= GTF_DONT_CSE; } return finalTree; } //------------------------------------------------------------------------ // getLookupTree: get a lookup tree // // Arguments: // pResolvedToken - resolved token of the call // pLookup - the lookup to get the tree for // handleFlags - flags to set on the result node // compileTimeHandle - compile-time handle corresponding to the lookup // // Return Value: // A node representing the lookup tree // GenTree* Compiler::getLookupTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, GenTreeFlags handleFlags, void* compileTimeHandle) { if (!pLookup->lookupKind.needsRuntimeLookup) { // No runtime lookup is required. // Access is direct or memory-indirect (of a fixed address) reference CORINFO_GENERIC_HANDLE handle = nullptr; void* pIndirection = nullptr; assert(pLookup->constLookup.accessType != IAT_PPVALUE && pLookup->constLookup.accessType != IAT_RELPVALUE); if (pLookup->constLookup.accessType == IAT_VALUE) { handle = pLookup->constLookup.handle; } else if (pLookup->constLookup.accessType == IAT_PVALUE) { pIndirection = pLookup->constLookup.addr; } return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, compileTimeHandle); } return getRuntimeLookupTree(pResolvedToken, pLookup, compileTimeHandle); } //------------------------------------------------------------------------ // getRuntimeLookupTree: get a tree for a runtime lookup // // Arguments: // pResolvedToken - resolved token of the call // pLookup - the lookup to get the tree for // compileTimeHandle - compile-time handle corresponding to the lookup // // Return Value: // A node representing the runtime lookup tree // GenTree* Compiler::getRuntimeLookupTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, void* compileTimeHandle) { assert(!compIsForInlining()); CORINFO_RUNTIME_LOOKUP* pRuntimeLookup = &pLookup->runtimeLookup; // If pRuntimeLookup->indirections is equal to CORINFO_USEHELPER, it specifies that a run-time helper should be // used; otherwise, it specifies the number of indirections via pRuntimeLookup->offsets array. if ((pRuntimeLookup->indirections == CORINFO_USEHELPER) || pRuntimeLookup->testForNull || pRuntimeLookup->testForFixup) { // If the first condition is true, runtime lookup tree is available only via the run-time helper function. // TODO-CQ If the second or third condition is true, we are always using the slow path since we can't // introduce control flow at this point. See impRuntimeLookupToTree for the logic to avoid calling the helper. // The long-term solution is to introduce a new node representing a runtime lookup, create instances // of that node both in the importer and here, and expand the node in lower (introducing control flow if // necessary). return gtNewRuntimeLookupHelperCallNode(pRuntimeLookup, getRuntimeContextTree(pLookup->lookupKind.runtimeLookupKind), compileTimeHandle); } GenTree* result = getRuntimeContextTree(pLookup->lookupKind.runtimeLookupKind); ArrayStack<GenTree*> stmts(getAllocator(CMK_ArrayStack)); auto cloneTree = [&](GenTree** tree DEBUGARG(const char* reason)) -> GenTree* { if (!((*tree)->gtFlags & GTF_GLOB_EFFECT)) { GenTree* clone = gtClone(*tree, true); if (clone) { return clone; } } unsigned temp = lvaGrabTemp(true DEBUGARG(reason)); stmts.Push(gtNewTempAssign(temp, *tree)); *tree = gtNewLclvNode(temp, lvaGetActualType(temp)); return gtNewLclvNode(temp, lvaGetActualType(temp)); }; // Apply repeated indirections for (WORD i = 0; i < pRuntimeLookup->indirections; i++) { GenTree* preInd = nullptr; if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset)) { preInd = cloneTree(&result DEBUGARG("getRuntimeLookupTree indirectOffset")); } if (i != 0) { result = gtNewOperNode(GT_IND, TYP_I_IMPL, result); result->gtFlags |= GTF_IND_NONFAULTING; result->gtFlags |= GTF_IND_INVARIANT; } if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset)) { result = gtNewOperNode(GT_ADD, TYP_I_IMPL, preInd, result); } if (pRuntimeLookup->offsets[i] != 0) { result = gtNewOperNode(GT_ADD, TYP_I_IMPL, result, gtNewIconNode(pRuntimeLookup->offsets[i], TYP_I_IMPL)); } } assert(!pRuntimeLookup->testForNull); if (pRuntimeLookup->indirections > 0) { assert(!pRuntimeLookup->testForFixup); result = gtNewOperNode(GT_IND, TYP_I_IMPL, result); result->gtFlags |= GTF_IND_NONFAULTING; } // Produces GT_COMMA(stmt1, GT_COMMA(stmt2, ... GT_COMMA(stmtN, result))) while (!stmts.Empty()) { result = gtNewOperNode(GT_COMMA, TYP_I_IMPL, stmts.Pop(), result); } DISPTREE(result); return result; } //------------------------------------------------------------------------ // getVirtMethodPointerTree: get a tree for a virtual method pointer // // Arguments: // thisPtr - tree representing `this` pointer // pResolvedToken - pointer to the resolved token of the method // pCallInfo - pointer to call info // // Return Value: // A node representing the virtual method pointer GenTree* Compiler::getVirtMethodPointerTree(GenTree* thisPtr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo) { GenTree* exactTypeDesc = getTokenHandleTree(pResolvedToken, true); GenTree* exactMethodDesc = getTokenHandleTree(pResolvedToken, false); GenTreeCall::Use* helpArgs = gtNewCallArgs(thisPtr, exactTypeDesc, exactMethodDesc); return gtNewHelperCallNode(CORINFO_HELP_VIRTUAL_FUNC_PTR, TYP_I_IMPL, helpArgs); } //------------------------------------------------------------------------ // getTokenHandleTree: get a handle tree for a token // // Arguments: // pResolvedToken - token to get a handle for // parent - whether parent should be imported // // Return Value: // A node representing the virtual method pointer GenTree* Compiler::getTokenHandleTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool parent) { CORINFO_GENERICHANDLE_RESULT embedInfo; info.compCompHnd->embedGenericHandle(pResolvedToken, parent, &embedInfo); GenTree* result = getLookupTree(pResolvedToken, &embedInfo.lookup, gtTokenToIconFlags(pResolvedToken->token), embedInfo.compileTimeHandle); // If we have a result and it requires runtime lookup, wrap it in a runtime lookup node. if ((result != nullptr) && embedInfo.lookup.lookupKind.needsRuntimeLookup) { result = gtNewRuntimeLookup(embedInfo.compileTimeHandle, embedInfo.handleType, result); } return result; } /***************************************************************************** * * Transform the given GT_CALL tree for tail call via JIT helper. */ void Compiler::fgMorphTailCallViaJitHelper(GenTreeCall* call) { JITDUMP("fgMorphTailCallViaJitHelper (before):\n"); DISPTREE(call); // For the helper-assisted tail calls, we need to push all the arguments // into a single list, and then add a few extra at the beginning or end. // // For x86, the tailcall helper is defined as: // // JIT_TailCall(<function args>, int numberOfOldStackArgsWords, int numberOfNewStackArgsWords, int flags, void* // callTarget) // // Note that the special arguments are on the stack, whereas the function arguments follow // the normal convention: there might be register arguments in ECX and EDX. The stack will // look like (highest address at the top): // first normal stack argument // ... // last normal stack argument // numberOfOldStackArgs // numberOfNewStackArgs // flags // callTarget // // Each special arg is 4 bytes. // // 'flags' is a bitmask where: // 1 == restore callee-save registers (EDI,ESI,EBX). The JIT always saves all // callee-saved registers for tailcall functions. Note that the helper assumes // that the callee-saved registers live immediately below EBP, and must have been // pushed in this order: EDI, ESI, EBX. // 2 == call target is a virtual stub dispatch. // // The x86 tail call helper lives in VM\i386\jithelp.asm. See that function for more details // on the custom calling convention. // Check for PInvoke call types that we don't handle in codegen yet. assert(!call->IsUnmanaged()); assert(call->IsVirtual() || (call->gtCallType != CT_INDIRECT) || (call->gtCallCookie == nullptr)); // Don't support tail calling helper methods assert(call->gtCallType != CT_HELPER); // We come this route only for tail prefixed calls that cannot be dispatched as // fast tail calls assert(!call->IsImplicitTailCall()); // We want to use the following assert, but it can modify the IR in some cases, so we // can't do that in an assert. // assert(!fgCanFastTailCall(call, nullptr)); // First move the 'this' pointer (if any) onto the regular arg list. We do this because // we are going to prepend special arguments onto the argument list (for non-x86 platforms), // and thus shift where the 'this' pointer will be passed to a later argument slot. In // addition, for all platforms, we are going to change the call into a helper call. Our code // generation code for handling calls to helpers does not handle 'this' pointers. So, when we // do this transformation, we must explicitly create a null 'this' pointer check, if required, // since special 'this' pointer handling will no longer kick in. // // Some call types, such as virtual vtable calls, require creating a call address expression // that involves the "this" pointer. Lowering will sometimes create an embedded statement // to create a temporary that is assigned to the "this" pointer expression, and then use // that temp to create the call address expression. This temp creation embedded statement // will occur immediately before the "this" pointer argument, and then will be used for both // the "this" pointer argument as well as the call address expression. In the normal ordering, // the embedded statement establishing the "this" pointer temp will execute before both uses // of the temp. However, for tail calls via a helper, we move the "this" pointer onto the // normal call argument list, and insert a placeholder which will hold the call address // expression. For non-x86, things are ok, because the order of execution of these is not // altered. However, for x86, the call address expression is inserted as the *last* argument // in the argument list, *after* the "this" pointer. It will be put on the stack, and be // evaluated first. To ensure we don't end up with out-of-order temp definition and use, // for those cases where call lowering creates an embedded form temp of "this", we will // create a temp here, early, that will later get morphed correctly. if (call->gtCallThisArg != nullptr) { GenTree* thisPtr = nullptr; GenTree* objp = call->gtCallThisArg->GetNode(); call->gtCallThisArg = nullptr; if ((call->IsDelegateInvoke() || call->IsVirtualVtable()) && !objp->OperIs(GT_LCL_VAR)) { // tmp = "this" unsigned lclNum = lvaGrabTemp(true DEBUGARG("tail call thisptr")); GenTree* asg = gtNewTempAssign(lclNum, objp); // COMMA(tmp = "this", tmp) var_types vt = objp->TypeGet(); GenTree* tmp = gtNewLclvNode(lclNum, vt); thisPtr = gtNewOperNode(GT_COMMA, vt, asg, tmp); objp = thisPtr; } if (call->NeedsNullCheck()) { // clone "this" if "this" has no side effects. if ((thisPtr == nullptr) && !(objp->gtFlags & GTF_SIDE_EFFECT)) { thisPtr = gtClone(objp, true); } var_types vt = objp->TypeGet(); if (thisPtr == nullptr) { // create a temp if either "this" has side effects or "this" is too complex to clone. // tmp = "this" unsigned lclNum = lvaGrabTemp(true DEBUGARG("tail call thisptr")); GenTree* asg = gtNewTempAssign(lclNum, objp); // COMMA(tmp = "this", deref(tmp)) GenTree* tmp = gtNewLclvNode(lclNum, vt); GenTree* nullcheck = gtNewNullCheck(tmp, compCurBB); asg = gtNewOperNode(GT_COMMA, TYP_VOID, asg, nullcheck); // COMMA(COMMA(tmp = "this", deref(tmp)), tmp) thisPtr = gtNewOperNode(GT_COMMA, vt, asg, gtNewLclvNode(lclNum, vt)); } else { // thisPtr = COMMA(deref("this"), "this") GenTree* nullcheck = gtNewNullCheck(thisPtr, compCurBB); thisPtr = gtNewOperNode(GT_COMMA, vt, nullcheck, gtClone(objp, true)); } call->gtFlags &= ~GTF_CALL_NULLCHECK; } else { thisPtr = objp; } // TODO-Cleanup: we leave it as a virtual stub call to // use logic in `LowerVirtualStubCall`, clear GTF_CALL_VIRT_KIND_MASK here // and change `LowerCall` to recognize it as a direct call. // During rationalization tmp="this" and null check will // materialize as embedded stmts in right execution order. assert(thisPtr != nullptr); call->gtCallArgs = gtPrependNewCallArg(thisPtr, call->gtCallArgs); } // Find the end of the argument list. ppArg will point at the last pointer; setting *ppArg will // append to the list. GenTreeCall::Use** ppArg = &call->gtCallArgs; for (GenTreeCall::Use& use : call->Args()) { ppArg = &use.NextRef(); } assert(ppArg != nullptr); assert(*ppArg == nullptr); unsigned nOldStkArgsWords = (compArgSize - (codeGen->intRegState.rsCalleeRegArgCount * REGSIZE_BYTES)) / REGSIZE_BYTES; GenTree* arg3 = gtNewIconNode((ssize_t)nOldStkArgsWords, TYP_I_IMPL); *ppArg = gtNewCallArgs(arg3); // numberOfOldStackArgs ppArg = &((*ppArg)->NextRef()); // Inject a placeholder for the count of outgoing stack arguments that the Lowering phase will generate. // The constant will be replaced. GenTree* arg2 = gtNewIconNode(9, TYP_I_IMPL); *ppArg = gtNewCallArgs(arg2); // numberOfNewStackArgs ppArg = &((*ppArg)->NextRef()); // Inject a placeholder for the flags. // The constant will be replaced. GenTree* arg1 = gtNewIconNode(8, TYP_I_IMPL); *ppArg = gtNewCallArgs(arg1); ppArg = &((*ppArg)->NextRef()); // Inject a placeholder for the real call target that the Lowering phase will generate. // The constant will be replaced. GenTree* arg0 = gtNewIconNode(7, TYP_I_IMPL); *ppArg = gtNewCallArgs(arg0); // It is now a varargs tail call. call->gtCallMoreFlags |= GTF_CALL_M_VARARGS; call->gtFlags &= ~GTF_CALL_POP_ARGS; // The function is responsible for doing explicit null check when it is necessary. assert(!call->NeedsNullCheck()); JITDUMP("fgMorphTailCallViaJitHelper (after):\n"); DISPTREE(call); } //------------------------------------------------------------------------ // fgGetStubAddrArg: Return the virtual stub address for the given call. // // Notes: // the JIT must place the address of the stub used to load the call target, // the "stub indirection cell", in special call argument with special register. // // Arguments: // call - a call that needs virtual stub dispatching. // // Return Value: // addr tree with set resister requirements. // GenTree* Compiler::fgGetStubAddrArg(GenTreeCall* call) { assert(call->IsVirtualStub()); GenTree* stubAddrArg; if (call->gtCallType == CT_INDIRECT) { stubAddrArg = gtClone(call->gtCallAddr, true); } else { assert(call->gtCallMoreFlags & GTF_CALL_M_VIRTSTUB_REL_INDIRECT); ssize_t addr = ssize_t(call->gtStubCallStubAddr); stubAddrArg = gtNewIconHandleNode(addr, GTF_ICON_FTN_ADDR); #ifdef DEBUG stubAddrArg->AsIntCon()->gtTargetHandle = (size_t)call->gtCallMethHnd; #endif } assert(stubAddrArg != nullptr); stubAddrArg->SetRegNum(virtualStubParamInfo->GetReg()); return stubAddrArg; } //------------------------------------------------------------------------------ // fgGetArgTabEntryParameterLclNum : Get the lcl num for the parameter that // corresponds to the argument to a recursive call. // // Notes: // Due to non-standard args this is not just fgArgTabEntry::argNum. // For example, in R2R compilations we will have added a non-standard // arg for the R2R indirection cell. // // Arguments: // argTabEntry - the arg // unsigned Compiler::fgGetArgTabEntryParameterLclNum(GenTreeCall* call, fgArgTabEntry* argTabEntry) { fgArgInfo* argInfo = call->fgArgInfo; unsigned argCount = argInfo->ArgCount(); fgArgTabEntry** argTable = argInfo->ArgTable(); unsigned numToRemove = 0; for (unsigned i = 0; i < argCount; i++) { fgArgTabEntry* arg = argTable[i]; // Late added args add extra args that do not map to IL parameters and that we should not reassign. if (!arg->isNonStandard() || !arg->isNonStandardArgAddedLate()) continue; if (arg->argNum < argTabEntry->argNum) numToRemove++; } return argTabEntry->argNum - numToRemove; } //------------------------------------------------------------------------------ // fgMorphRecursiveFastTailCallIntoLoop : Transform a recursive fast tail call into a loop. // // // Arguments: // block - basic block ending with a recursive fast tail call // recursiveTailCall - recursive tail call to transform // // Notes: // The legality of the transformation is ensured by the checks in endsWithTailCallConvertibleToLoop. void Compiler::fgMorphRecursiveFastTailCallIntoLoop(BasicBlock* block, GenTreeCall* recursiveTailCall) { assert(recursiveTailCall->IsTailCallConvertibleToLoop()); Statement* lastStmt = block->lastStmt(); assert(recursiveTailCall == lastStmt->GetRootNode()); // Transform recursive tail call into a loop. Statement* earlyArgInsertionPoint = lastStmt; const DebugInfo& callDI = lastStmt->GetDebugInfo(); // Hoist arg setup statement for the 'this' argument. GenTreeCall::Use* thisArg = recursiveTailCall->gtCallThisArg; if ((thisArg != nullptr) && !thisArg->GetNode()->IsNothingNode() && !thisArg->GetNode()->IsArgPlaceHolderNode()) { Statement* thisArgStmt = gtNewStmt(thisArg->GetNode(), callDI); fgInsertStmtBefore(block, earlyArgInsertionPoint, thisArgStmt); } // All arguments whose trees may involve caller parameter local variables need to be assigned to temps first; // then the temps need to be assigned to the method parameters. This is done so that the caller // parameters are not re-assigned before call arguments depending on them are evaluated. // tmpAssignmentInsertionPoint and paramAssignmentInsertionPoint keep track of // where the next temp or parameter assignment should be inserted. // In the example below the first call argument (arg1 - 1) needs to be assigned to a temp first // while the second call argument (const 1) doesn't. // Basic block before tail recursion elimination: // ***** BB04, stmt 1 (top level) // [000037] ------------ * stmtExpr void (top level) (IL 0x00A...0x013) // [000033] --C - G------ - \--* call void RecursiveMethod // [000030] ------------ | / --* const int - 1 // [000031] ------------arg0 in rcx + --* +int // [000029] ------------ | \--* lclVar int V00 arg1 // [000032] ------------arg1 in rdx \--* const int 1 // // // Basic block after tail recursion elimination : // ***** BB04, stmt 1 (top level) // [000051] ------------ * stmtExpr void (top level) (IL 0x00A... ? ? ? ) // [000030] ------------ | / --* const int - 1 // [000031] ------------ | / --* +int // [000029] ------------ | | \--* lclVar int V00 arg1 // [000050] - A---------- \--* = int // [000049] D------N---- \--* lclVar int V02 tmp0 // // ***** BB04, stmt 2 (top level) // [000055] ------------ * stmtExpr void (top level) (IL 0x00A... ? ? ? ) // [000052] ------------ | / --* lclVar int V02 tmp0 // [000054] - A---------- \--* = int // [000053] D------N---- \--* lclVar int V00 arg0 // ***** BB04, stmt 3 (top level) // [000058] ------------ * stmtExpr void (top level) (IL 0x00A... ? ? ? ) // [000032] ------------ | / --* const int 1 // [000057] - A---------- \--* = int // [000056] D------N---- \--* lclVar int V01 arg1 Statement* tmpAssignmentInsertionPoint = lastStmt; Statement* paramAssignmentInsertionPoint = lastStmt; // Process early args. They may contain both setup statements for late args and actual args. // Early args don't include 'this' arg. We need to account for that so that the call to gtArgEntryByArgNum // below has the correct second argument. int earlyArgIndex = (thisArg == nullptr) ? 0 : 1; for (GenTreeCall::Use& use : recursiveTailCall->Args()) { GenTree* earlyArg = use.GetNode(); if (!earlyArg->IsNothingNode() && !earlyArg->IsArgPlaceHolderNode()) { if ((earlyArg->gtFlags & GTF_LATE_ARG) != 0) { // This is a setup node so we need to hoist it. Statement* earlyArgStmt = gtNewStmt(earlyArg, callDI); fgInsertStmtBefore(block, earlyArgInsertionPoint, earlyArgStmt); } else { // This is an actual argument that needs to be assigned to the corresponding caller parameter. fgArgTabEntry* curArgTabEntry = gtArgEntryByArgNum(recursiveTailCall, earlyArgIndex); // Late-added non-standard args are extra args that are not passed as locals, so skip those if (!curArgTabEntry->isNonStandard() || !curArgTabEntry->isNonStandardArgAddedLate()) { Statement* paramAssignStmt = fgAssignRecursiveCallArgToCallerParam(earlyArg, curArgTabEntry, fgGetArgTabEntryParameterLclNum(recursiveTailCall, curArgTabEntry), block, callDI, tmpAssignmentInsertionPoint, paramAssignmentInsertionPoint); if ((tmpAssignmentInsertionPoint == lastStmt) && (paramAssignStmt != nullptr)) { // All temp assignments will happen before the first param assignment. tmpAssignmentInsertionPoint = paramAssignStmt; } } } } earlyArgIndex++; } // Process late args. int lateArgIndex = 0; for (GenTreeCall::Use& use : recursiveTailCall->LateArgs()) { // A late argument is an actual argument that needs to be assigned to the corresponding caller's parameter. GenTree* lateArg = use.GetNode(); fgArgTabEntry* curArgTabEntry = gtArgEntryByLateArgIndex(recursiveTailCall, lateArgIndex); // Late-added non-standard args are extra args that are not passed as locals, so skip those if (!curArgTabEntry->isNonStandard() || !curArgTabEntry->isNonStandardArgAddedLate()) { Statement* paramAssignStmt = fgAssignRecursiveCallArgToCallerParam(lateArg, curArgTabEntry, fgGetArgTabEntryParameterLclNum(recursiveTailCall, curArgTabEntry), block, callDI, tmpAssignmentInsertionPoint, paramAssignmentInsertionPoint); if ((tmpAssignmentInsertionPoint == lastStmt) && (paramAssignStmt != nullptr)) { // All temp assignments will happen before the first param assignment. tmpAssignmentInsertionPoint = paramAssignStmt; } } lateArgIndex++; } // If the method has starg.s 0 or ldarga.s 0 a special local (lvaArg0Var) is created so that // compThisArg stays immutable. Normally it's assigned in fgFirstBBScratch block. Since that // block won't be in the loop (it's assumed to have no predecessors), we need to update the special local here. if (!info.compIsStatic && (lvaArg0Var != info.compThisArg)) { var_types thisType = lvaTable[info.compThisArg].TypeGet(); GenTree* arg0 = gtNewLclvNode(lvaArg0Var, thisType); GenTree* arg0Assignment = gtNewAssignNode(arg0, gtNewLclvNode(info.compThisArg, thisType)); Statement* arg0AssignmentStmt = gtNewStmt(arg0Assignment, callDI); fgInsertStmtBefore(block, paramAssignmentInsertionPoint, arg0AssignmentStmt); } // If compInitMem is set, we may need to zero-initialize some locals. Normally it's done in the prolog // but this loop can't include the prolog. Since we don't have liveness information, we insert zero-initialization // for all non-parameter IL locals as well as temp structs with GC fields. // Liveness phase will remove unnecessary initializations. if (info.compInitMem || compSuppressedZeroInit) { unsigned varNum; LclVarDsc* varDsc; for (varNum = 0, varDsc = lvaTable; varNum < lvaCount; varNum++, varDsc++) { #if FEATURE_FIXED_OUT_ARGS if (varNum == lvaOutgoingArgSpaceVar) { continue; } #endif // FEATURE_FIXED_OUT_ARGS if (!varDsc->lvIsParam) { var_types lclType = varDsc->TypeGet(); bool isUserLocal = (varNum < info.compLocalsCount); bool structWithGCFields = ((lclType == TYP_STRUCT) && varDsc->GetLayout()->HasGCPtr()); bool hadSuppressedInit = varDsc->lvSuppressedZeroInit; if ((info.compInitMem && (isUserLocal || structWithGCFields)) || hadSuppressedInit) { GenTree* lcl = gtNewLclvNode(varNum, lclType); GenTree* init = nullptr; if (varTypeIsStruct(lclType)) { const bool isVolatile = false; const bool isCopyBlock = false; init = gtNewBlkOpNode(lcl, gtNewIconNode(0), isVolatile, isCopyBlock); init = fgMorphInitBlock(init); } else { GenTree* zero = gtNewZeroConNode(genActualType(lclType)); init = gtNewAssignNode(lcl, zero); } Statement* initStmt = gtNewStmt(init, callDI); fgInsertStmtBefore(block, lastStmt, initStmt); } } } } // Remove the call fgRemoveStmt(block, lastStmt); // Set the loop edge. if (opts.IsOSR()) { // Todo: this may not look like a viable loop header. // Might need the moral equivalent of a scratch BB. block->bbJumpDest = fgEntryBB; } else { // Ensure we have a scratch block and then target the next // block. Loop detection needs to see a pred out of the loop, // so mark the scratch block BBF_DONT_REMOVE to prevent empty // block removal on it. fgEnsureFirstBBisScratch(); fgFirstBB->bbFlags |= BBF_DONT_REMOVE; block->bbJumpDest = fgFirstBB->bbNext; } // Finish hooking things up. block->bbJumpKind = BBJ_ALWAYS; fgAddRefPred(block->bbJumpDest, block); block->bbFlags &= ~BBF_HAS_JMP; } //------------------------------------------------------------------------------ // fgAssignRecursiveCallArgToCallerParam : Assign argument to a recursive call to the corresponding caller parameter. // // // Arguments: // arg - argument to assign // argTabEntry - argument table entry corresponding to arg // lclParamNum - the lcl num of the parameter // block --- basic block the call is in // callILOffset - IL offset of the call // tmpAssignmentInsertionPoint - tree before which temp assignment should be inserted (if necessary) // paramAssignmentInsertionPoint - tree before which parameter assignment should be inserted // // Return Value: // parameter assignment statement if one was inserted; nullptr otherwise. Statement* Compiler::fgAssignRecursiveCallArgToCallerParam(GenTree* arg, fgArgTabEntry* argTabEntry, unsigned lclParamNum, BasicBlock* block, const DebugInfo& callDI, Statement* tmpAssignmentInsertionPoint, Statement* paramAssignmentInsertionPoint) { // Call arguments should be assigned to temps first and then the temps should be assigned to parameters because // some argument trees may reference parameters directly. GenTree* argInTemp = nullptr; bool needToAssignParameter = true; // TODO-CQ: enable calls with struct arguments passed in registers. noway_assert(!varTypeIsStruct(arg->TypeGet())); if ((argTabEntry->isTmp) || arg->IsCnsIntOrI() || arg->IsCnsFltOrDbl()) { // The argument is already assigned to a temp or is a const. argInTemp = arg; } else if (arg->OperGet() == GT_LCL_VAR) { unsigned lclNum = arg->AsLclVar()->GetLclNum(); LclVarDsc* varDsc = lvaGetDesc(lclNum); if (!varDsc->lvIsParam) { // The argument is a non-parameter local so it doesn't need to be assigned to a temp. argInTemp = arg; } else if (lclNum == lclParamNum) { // The argument is the same parameter local that we were about to assign so // we can skip the assignment. needToAssignParameter = false; } } // TODO: We don't need temp assignments if we can prove that the argument tree doesn't involve // any caller parameters. Some common cases are handled above but we may be able to eliminate // more temp assignments. Statement* paramAssignStmt = nullptr; if (needToAssignParameter) { if (argInTemp == nullptr) { // The argument is not assigned to a temp. We need to create a new temp and insert an assignment. // TODO: we can avoid a temp assignment if we can prove that the argument tree // doesn't involve any caller parameters. unsigned tmpNum = lvaGrabTemp(true DEBUGARG("arg temp")); lvaTable[tmpNum].lvType = arg->gtType; GenTree* tempSrc = arg; GenTree* tempDest = gtNewLclvNode(tmpNum, tempSrc->gtType); GenTree* tmpAssignNode = gtNewAssignNode(tempDest, tempSrc); Statement* tmpAssignStmt = gtNewStmt(tmpAssignNode, callDI); fgInsertStmtBefore(block, tmpAssignmentInsertionPoint, tmpAssignStmt); argInTemp = gtNewLclvNode(tmpNum, tempSrc->gtType); } // Now assign the temp to the parameter. const LclVarDsc* paramDsc = lvaGetDesc(lclParamNum); assert(paramDsc->lvIsParam); GenTree* paramDest = gtNewLclvNode(lclParamNum, paramDsc->lvType); GenTree* paramAssignNode = gtNewAssignNode(paramDest, argInTemp); paramAssignStmt = gtNewStmt(paramAssignNode, callDI); fgInsertStmtBefore(block, paramAssignmentInsertionPoint, paramAssignStmt); } return paramAssignStmt; } /***************************************************************************** * * Transform the given GT_CALL tree for code generation. */ GenTree* Compiler::fgMorphCall(GenTreeCall* call) { if (call->CanTailCall()) { GenTree* newNode = fgMorphPotentialTailCall(call); if (newNode != nullptr) { return newNode; } assert(!call->CanTailCall()); #if FEATURE_MULTIREG_RET if (fgGlobalMorph && call->HasMultiRegRetVal() && varTypeIsStruct(call->TypeGet())) { // The tail call has been rejected so we must finish the work deferred // by impFixupCallStructReturn for multi-reg-returning calls and transform // ret call // into // temp = call // ret temp // Force re-evaluating the argInfo as the return argument has changed. call->ResetArgInfo(); // Create a new temp. unsigned tmpNum = lvaGrabTemp(false DEBUGARG("Return value temp for multi-reg return (rejected tail call).")); lvaTable[tmpNum].lvIsMultiRegRet = true; CORINFO_CLASS_HANDLE structHandle = call->gtRetClsHnd; assert(structHandle != NO_CLASS_HANDLE); const bool unsafeValueClsCheck = false; lvaSetStruct(tmpNum, structHandle, unsafeValueClsCheck); var_types structType = lvaTable[tmpNum].lvType; GenTree* dst = gtNewLclvNode(tmpNum, structType); GenTree* assg = gtNewAssignNode(dst, call); assg = fgMorphTree(assg); // Create the assignment statement and insert it before the current statement. Statement* assgStmt = gtNewStmt(assg, compCurStmt->GetDebugInfo()); fgInsertStmtBefore(compCurBB, compCurStmt, assgStmt); // Return the temp. GenTree* result = gtNewLclvNode(tmpNum, lvaTable[tmpNum].lvType); result->gtFlags |= GTF_DONT_CSE; compCurBB->bbFlags |= BBF_HAS_CALL; // This block has a call #ifdef DEBUG if (verbose) { printf("\nInserting assignment of a multi-reg call result to a temp:\n"); gtDispStmt(assgStmt); } result->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif // DEBUG return result; } #endif } if ((call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) == 0 && (call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_VIRTUAL_FUNC_PTR) #ifdef FEATURE_READYTORUN || call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_READYTORUN_VIRTUAL_FUNC_PTR) #endif ) && (call == fgMorphStmt->GetRootNode())) { // This is call to CORINFO_HELP_VIRTUAL_FUNC_PTR with ignored result. // Transform it into a null check. GenTree* thisPtr = call->gtCallArgs->GetNode(); GenTree* nullCheck = gtNewNullCheck(thisPtr, compCurBB); return fgMorphTree(nullCheck); } noway_assert(call->gtOper == GT_CALL); // // Only count calls once (only in the global morph phase) // if (fgGlobalMorph) { if (call->gtCallType == CT_INDIRECT) { optCallCount++; optIndirectCallCount++; } else if (call->gtCallType == CT_USER_FUNC) { optCallCount++; if (call->IsVirtual()) { optIndirectCallCount++; } } } // Couldn't inline - remember that this BB contains method calls // Mark the block as a GC safe point for the call if possible. // In the event the call indicates the block isn't a GC safe point // and the call is unmanaged with a GC transition suppression request // then insert a GC poll. CLANG_FORMAT_COMMENT_ANCHOR; if (IsGcSafePoint(call)) { compCurBB->bbFlags |= BBF_GC_SAFE_POINT; } // Regardless of the state of the basic block with respect to GC safe point, // we will always insert a GC Poll for scenarios involving a suppressed GC // transition. Only mark the block for GC Poll insertion on the first morph. if (fgGlobalMorph && call->IsUnmanaged() && call->IsSuppressGCTransition()) { compCurBB->bbFlags |= (BBF_HAS_SUPPRESSGC_CALL | BBF_GC_SAFE_POINT); optMethodFlags |= OMF_NEEDS_GCPOLLS; } // Morph Type.op_Equality, Type.op_Inequality, and Enum.HasFlag // // We need to do these before the arguments are morphed if ((call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC)) { // See if this is foldable GenTree* optTree = gtFoldExprCall(call); // If we optimized, morph the result if (optTree != call) { return fgMorphTree(optTree); } } compCurBB->bbFlags |= BBF_HAS_CALL; // This block has a call /* Process the "normal" argument list */ call = fgMorphArgs(call); noway_assert(call->gtOper == GT_CALL); // Should we expand this virtual method call target early here? // if (call->IsExpandedEarly() && call->IsVirtualVtable()) { // We only expand the Vtable Call target once in the global morph phase if (fgGlobalMorph) { assert(call->gtControlExpr == nullptr); // We only call this method and assign gtControlExpr once call->gtControlExpr = fgExpandVirtualVtableCallTarget(call); } // We always have to morph or re-morph the control expr // call->gtControlExpr = fgMorphTree(call->gtControlExpr); // Propagate any gtFlags into the call call->gtFlags |= call->gtControlExpr->gtFlags; } // Morph stelem.ref helper call to store a null value, into a store into an array without the helper. // This needs to be done after the arguments are morphed to ensure constant propagation has already taken place. if (opts.OptimizationEnabled() && (call->gtCallType == CT_HELPER) && (call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_ARRADDR_ST))) { GenTree* value = gtArgEntryByArgNum(call, 2)->GetNode(); if (value->IsIntegralConst(0)) { assert(value->OperGet() == GT_CNS_INT); GenTree* arr = gtArgEntryByArgNum(call, 0)->GetNode(); GenTree* index = gtArgEntryByArgNum(call, 1)->GetNode(); // Either or both of the array and index arguments may have been spilled to temps by `fgMorphArgs`. Copy // the spill trees as well if necessary. GenTreeOp* argSetup = nullptr; for (GenTreeCall::Use& use : call->Args()) { GenTree* const arg = use.GetNode(); if (arg->OperGet() != GT_ASG) { continue; } assert(arg != arr); assert(arg != index); arg->gtFlags &= ~GTF_LATE_ARG; GenTree* op1 = argSetup; if (op1 == nullptr) { op1 = gtNewNothingNode(); #if DEBUG op1->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif // DEBUG } argSetup = new (this, GT_COMMA) GenTreeOp(GT_COMMA, TYP_VOID, op1, arg); #if DEBUG argSetup->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif // DEBUG } #ifdef DEBUG auto resetMorphedFlag = [](GenTree** slot, fgWalkData* data) -> fgWalkResult { (*slot)->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED; return WALK_CONTINUE; }; fgWalkTreePost(&arr, resetMorphedFlag); fgWalkTreePost(&index, resetMorphedFlag); fgWalkTreePost(&value, resetMorphedFlag); #endif // DEBUG GenTree* const arrIndexNode = gtNewIndexRef(TYP_REF, arr, index); GenTree* const arrStore = gtNewAssignNode(arrIndexNode, value); GenTree* result = fgMorphTree(arrStore); if (argSetup != nullptr) { result = new (this, GT_COMMA) GenTreeOp(GT_COMMA, TYP_VOID, argSetup, result); #if DEBUG result->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif // DEBUG } return result; } } if (call->IsNoReturn()) { // // If we know that the call does not return then we can set fgRemoveRestOfBlock // to remove all subsequent statements and change the call's basic block to BBJ_THROW. // As a result the compiler won't need to preserve live registers across the call. // // This isn't need for tail calls as there shouldn't be any code after the call anyway. // Besides, the tail call code is part of the epilog and converting the block to // BBJ_THROW would result in the tail call being dropped as the epilog is generated // only for BBJ_RETURN blocks. // if (!call->IsTailCall()) { fgRemoveRestOfBlock = true; } } return call; } /***************************************************************************** * * Expand and return the call target address for a VirtualCall * The code here should match that generated by LowerVirtualVtableCall */ GenTree* Compiler::fgExpandVirtualVtableCallTarget(GenTreeCall* call) { GenTree* result; JITDUMP("Expanding virtual call target for %d.%s:\n", call->gtTreeID, GenTree::OpName(call->gtOper)); noway_assert(call->gtCallType == CT_USER_FUNC); // get a reference to the thisPtr being passed fgArgTabEntry* thisArgTabEntry = gtArgEntryByArgNum(call, 0); GenTree* thisPtr = thisArgTabEntry->GetNode(); // fgMorphArgs must enforce this invariant by creating a temp // assert(thisPtr->OperIsLocal()); // Make a copy of the thisPtr by cloning // thisPtr = gtClone(thisPtr, true); noway_assert(thisPtr != nullptr); // Get hold of the vtable offset unsigned vtabOffsOfIndirection; unsigned vtabOffsAfterIndirection; bool isRelative; info.compCompHnd->getMethodVTableOffset(call->gtCallMethHnd, &vtabOffsOfIndirection, &vtabOffsAfterIndirection, &isRelative); // Dereference the this pointer to obtain the method table, it is called vtab below GenTree* vtab; assert(VPTR_OFFS == 0); // We have to add this value to the thisPtr to get the methodTable vtab = gtNewOperNode(GT_IND, TYP_I_IMPL, thisPtr); vtab->gtFlags |= GTF_IND_INVARIANT; // Get the appropriate vtable chunk if (vtabOffsOfIndirection != CORINFO_VIRTUALCALL_NO_CHUNK) { // Note this isRelative code path is currently never executed // as the VM doesn't ever return: isRelative == true // if (isRelative) { // MethodTable offset is a relative pointer. // // Additional temporary variable is used to store virtual table pointer. // Address of method is obtained by the next computations: // // Save relative offset to tmp (vtab is virtual table pointer, vtabOffsOfIndirection is offset of // vtable-1st-level-indirection): // tmp = vtab // // Save address of method to result (vtabOffsAfterIndirection is offset of vtable-2nd-level-indirection): // result = [tmp + vtabOffsOfIndirection + vtabOffsAfterIndirection + [tmp + vtabOffsOfIndirection]] // // // When isRelative is true we need to setup two temporary variables // var1 = vtab // var2 = var1 + vtabOffsOfIndirection + vtabOffsAfterIndirection + [var1 + vtabOffsOfIndirection] // result = [var2] + var2 // unsigned varNum1 = lvaGrabTemp(true DEBUGARG("var1 - vtab")); unsigned varNum2 = lvaGrabTemp(true DEBUGARG("var2 - relative")); GenTree* asgVar1 = gtNewTempAssign(varNum1, vtab); // var1 = vtab // [tmp + vtabOffsOfIndirection] GenTree* tmpTree1 = gtNewOperNode(GT_ADD, TYP_I_IMPL, gtNewLclvNode(varNum1, TYP_I_IMPL), gtNewIconNode(vtabOffsOfIndirection, TYP_I_IMPL)); tmpTree1 = gtNewOperNode(GT_IND, TYP_I_IMPL, tmpTree1, false); tmpTree1->gtFlags |= GTF_IND_NONFAULTING; tmpTree1->gtFlags |= GTF_IND_INVARIANT; // var1 + vtabOffsOfIndirection + vtabOffsAfterIndirection GenTree* tmpTree2 = gtNewOperNode(GT_ADD, TYP_I_IMPL, gtNewLclvNode(varNum1, TYP_I_IMPL), gtNewIconNode(vtabOffsOfIndirection + vtabOffsAfterIndirection, TYP_I_IMPL)); // var1 + vtabOffsOfIndirection + vtabOffsAfterIndirection + [var1 + vtabOffsOfIndirection] tmpTree2 = gtNewOperNode(GT_ADD, TYP_I_IMPL, tmpTree2, tmpTree1); GenTree* asgVar2 = gtNewTempAssign(varNum2, tmpTree2); // var2 = <expression> // This last indirection is not invariant, but is non-faulting result = gtNewOperNode(GT_IND, TYP_I_IMPL, gtNewLclvNode(varNum2, TYP_I_IMPL), false); // [var2] result->gtFlags |= GTF_IND_NONFAULTING; result = gtNewOperNode(GT_ADD, TYP_I_IMPL, result, gtNewLclvNode(varNum2, TYP_I_IMPL)); // [var2] + var2 // Now stitch together the two assignment and the calculation of result into a single tree GenTree* commaTree = gtNewOperNode(GT_COMMA, TYP_I_IMPL, asgVar2, result); result = gtNewOperNode(GT_COMMA, TYP_I_IMPL, asgVar1, commaTree); } else { // result = [vtab + vtabOffsOfIndirection] result = gtNewOperNode(GT_ADD, TYP_I_IMPL, vtab, gtNewIconNode(vtabOffsOfIndirection, TYP_I_IMPL)); result = gtNewOperNode(GT_IND, TYP_I_IMPL, result, false); result->gtFlags |= GTF_IND_NONFAULTING; result->gtFlags |= GTF_IND_INVARIANT; } } else { result = vtab; assert(!isRelative); } if (!isRelative) { // Load the function address // result = [result + vtabOffsAfterIndirection] result = gtNewOperNode(GT_ADD, TYP_I_IMPL, result, gtNewIconNode(vtabOffsAfterIndirection, TYP_I_IMPL)); // This last indirection is not invariant, but is non-faulting result = gtNewOperNode(GT_IND, TYP_I_IMPL, result, false); result->gtFlags |= GTF_IND_NONFAULTING; } return result; } /***************************************************************************** * * Transform the given constant tree for code generation. */ GenTree* Compiler::fgMorphConst(GenTree* tree) { assert(tree->OperIsConst()); /* Clear any exception flags or other unnecessary flags * that may have been set before folding this node to a constant */ tree->gtFlags &= ~(GTF_ALL_EFFECT | GTF_REVERSE_OPS); if (!tree->OperIs(GT_CNS_STR)) { return tree; } if (tree->AsStrCon()->IsStringEmptyField()) { LPVOID pValue; InfoAccessType iat = info.compCompHnd->emptyStringLiteral(&pValue); return fgMorphTree(gtNewStringLiteralNode(iat, pValue)); } // TODO-CQ: Do this for compCurBB->isRunRarely(). Doing that currently will // guarantee slow performance for that block. Instead cache the return value // of CORINFO_HELP_STRCNS and go to cache first giving reasonable perf. bool useLazyStrCns = false; if (compCurBB->bbJumpKind == BBJ_THROW) { useLazyStrCns = true; } else if (fgGlobalMorph && compCurStmt->GetRootNode()->IsCall()) { // Quick check: if the root node of the current statement happens to be a noreturn call. GenTreeCall* call = compCurStmt->GetRootNode()->AsCall(); useLazyStrCns = call->IsNoReturn() || fgIsThrow(call); } if (useLazyStrCns) { CorInfoHelpFunc helper = info.compCompHnd->getLazyStringLiteralHelper(tree->AsStrCon()->gtScpHnd); if (helper != CORINFO_HELP_UNDEF) { // For un-important blocks, we want to construct the string lazily GenTreeCall::Use* args; if (helper == CORINFO_HELP_STRCNS_CURRENT_MODULE) { args = gtNewCallArgs(gtNewIconNode(RidFromToken(tree->AsStrCon()->gtSconCPX), TYP_INT)); } else { args = gtNewCallArgs(gtNewIconNode(RidFromToken(tree->AsStrCon()->gtSconCPX), TYP_INT), gtNewIconEmbScpHndNode(tree->AsStrCon()->gtScpHnd)); } tree = gtNewHelperCallNode(helper, TYP_REF, args); return fgMorphTree(tree); } } assert(tree->AsStrCon()->gtScpHnd == info.compScopeHnd || !IsUninitialized(tree->AsStrCon()->gtScpHnd)); LPVOID pValue; InfoAccessType iat = info.compCompHnd->constructStringLiteral(tree->AsStrCon()->gtScpHnd, tree->AsStrCon()->gtSconCPX, &pValue); tree = gtNewStringLiteralNode(iat, pValue); return fgMorphTree(tree); } //------------------------------------------------------------------------ // fgMorphTryFoldObjAsLclVar: try to fold an Obj node as a LclVar. // // Arguments: // obj - the obj node. // destroyNodes -- destroy nodes that are optimized away // // Return value: // GenTreeLclVar if the obj can be replaced by it, null otherwise. // // Notes: // TODO-CQ: currently this transformation is done only under copy block, // but it is benefitial to do for each OBJ node. However, `PUT_ARG_STACK` // for some platforms does not expect struct `LCL_VAR` as a source, so // it needs more work. // GenTreeLclVar* Compiler::fgMorphTryFoldObjAsLclVar(GenTreeObj* obj, bool destroyNodes) { if (opts.OptimizationEnabled()) { GenTree* op1 = obj->Addr(); assert(!op1->OperIs(GT_LCL_VAR_ADDR) && "missed an opt opportunity"); if (op1->OperIs(GT_ADDR)) { GenTreeUnOp* addr = op1->AsUnOp(); GenTree* addrOp = addr->gtGetOp1(); if (addrOp->TypeIs(obj->TypeGet()) && addrOp->OperIs(GT_LCL_VAR)) { GenTreeLclVar* lclVar = addrOp->AsLclVar(); ClassLayout* lclVarLayout = lvaGetDesc(lclVar)->GetLayout(); ClassLayout* objLayout = obj->GetLayout(); if (ClassLayout::AreCompatible(lclVarLayout, objLayout)) { #ifdef DEBUG CORINFO_CLASS_HANDLE objClsHandle = obj->GetLayout()->GetClassHandle(); assert(objClsHandle != NO_CLASS_HANDLE); if (verbose) { CORINFO_CLASS_HANDLE lclClsHnd = gtGetStructHandle(lclVar); printf("fold OBJ(ADDR(X)) [%06u] into X [%06u], ", dspTreeID(obj), dspTreeID(lclVar)); printf("with %s handles\n", ((lclClsHnd == objClsHandle) ? "matching" : "different")); } #endif // Keep the DONT_CSE flag in sync // (as the addr always marks it for its op1) lclVar->gtFlags &= ~GTF_DONT_CSE; lclVar->gtFlags |= (obj->gtFlags & GTF_DONT_CSE); if (destroyNodes) { DEBUG_DESTROY_NODE(obj); DEBUG_DESTROY_NODE(addr); } return lclVar; } } } } return nullptr; } /***************************************************************************** * * Transform the given GTK_LEAF tree for code generation. */ GenTree* Compiler::fgMorphLeaf(GenTree* tree) { assert(tree->OperKind() & GTK_LEAF); if (tree->gtOper == GT_LCL_VAR) { const bool forceRemorph = false; return fgMorphLocalVar(tree, forceRemorph); } else if (tree->gtOper == GT_LCL_FLD) { if (lvaGetDesc(tree->AsLclFld())->IsAddressExposed()) { tree->gtFlags |= GTF_GLOB_REF; } #ifdef TARGET_X86 if (info.compIsVarArgs) { GenTree* newTree = fgMorphStackArgForVarArgs(tree->AsLclFld()->GetLclNum(), tree->TypeGet(), tree->AsLclFld()->GetLclOffs()); if (newTree != nullptr) { if (newTree->OperIsBlk() && ((tree->gtFlags & GTF_VAR_DEF) == 0)) { newTree->SetOper(GT_IND); } return newTree; } } #endif // TARGET_X86 } else if (tree->gtOper == GT_FTN_ADDR) { GenTreeFptrVal* fptrValTree = tree->AsFptrVal(); // A function pointer address is being used. Let the VM know if this is the // target of a Delegate or a raw function pointer. bool isUnsafeFunctionPointer = !fptrValTree->gtFptrDelegateTarget; CORINFO_CONST_LOOKUP addrInfo; #ifdef FEATURE_READYTORUN if (fptrValTree->gtEntryPoint.addr != nullptr) { addrInfo = fptrValTree->gtEntryPoint; } else #endif { info.compCompHnd->getFunctionFixedEntryPoint(fptrValTree->gtFptrMethod, isUnsafeFunctionPointer, &addrInfo); } GenTree* indNode = nullptr; switch (addrInfo.accessType) { case IAT_PPVALUE: indNode = gtNewIndOfIconHandleNode(TYP_I_IMPL, (size_t)addrInfo.handle, GTF_ICON_CONST_PTR, true); // Add the second indirection indNode = gtNewOperNode(GT_IND, TYP_I_IMPL, indNode); // This indirection won't cause an exception. indNode->gtFlags |= GTF_IND_NONFAULTING; // This indirection also is invariant. indNode->gtFlags |= GTF_IND_INVARIANT; break; case IAT_PVALUE: indNode = gtNewIndOfIconHandleNode(TYP_I_IMPL, (size_t)addrInfo.handle, GTF_ICON_FTN_ADDR, true); break; case IAT_VALUE: // Refer to gtNewIconHandleNode() as the template for constructing a constant handle // tree->SetOper(GT_CNS_INT); tree->AsIntConCommon()->SetIconValue(ssize_t(addrInfo.handle)); tree->gtFlags |= GTF_ICON_FTN_ADDR; break; default: noway_assert(!"Unknown addrInfo.accessType"); } if (indNode != nullptr) { DEBUG_DESTROY_NODE(tree); tree = fgMorphTree(indNode); } } return tree; } void Compiler::fgAssignSetVarDef(GenTree* tree) { GenTreeLclVarCommon* lclVarCmnTree; bool isEntire = false; if (tree->DefinesLocal(this, &lclVarCmnTree, &isEntire)) { if (isEntire) { lclVarCmnTree->gtFlags |= GTF_VAR_DEF; } else { // We consider partial definitions to be modeled as uses followed by definitions. // This captures the idea that precedings defs are not necessarily made redundant // by this definition. lclVarCmnTree->gtFlags |= (GTF_VAR_DEF | GTF_VAR_USEASG); } } } //------------------------------------------------------------------------ // fgMorphOneAsgBlockOp: Attempt to replace a block assignment with a scalar assignment // // Arguments: // tree - The block assignment to be possibly morphed // // Return Value: // The modified tree if successful, nullptr otherwise. // // Assumptions: // 'tree' must be a block assignment. // // Notes: // If successful, this method always returns the incoming tree, modifying only // its arguments. // GenTree* Compiler::fgMorphOneAsgBlockOp(GenTree* tree) { // This must be a block assignment. noway_assert(tree->OperIsBlkOp()); var_types asgType = tree->TypeGet(); GenTree* asg = tree; GenTree* dest = asg->gtGetOp1(); GenTree* src = asg->gtGetOp2(); unsigned destVarNum = BAD_VAR_NUM; LclVarDsc* destVarDsc = nullptr; GenTree* destLclVarTree = nullptr; bool isCopyBlock = asg->OperIsCopyBlkOp(); bool isInitBlock = !isCopyBlock; unsigned size = 0; CORINFO_CLASS_HANDLE clsHnd = NO_CLASS_HANDLE; if (dest->gtEffectiveVal()->OperIsBlk()) { GenTreeBlk* lhsBlk = dest->gtEffectiveVal()->AsBlk(); size = lhsBlk->Size(); if (impIsAddressInLocal(lhsBlk->Addr(), &destLclVarTree)) { destVarNum = destLclVarTree->AsLclVarCommon()->GetLclNum(); destVarDsc = lvaGetDesc(destVarNum); } if (lhsBlk->OperGet() == GT_OBJ) { clsHnd = lhsBlk->AsObj()->GetLayout()->GetClassHandle(); } } else { // Is this an enregisterable struct that is already a simple assignment? // This can happen if we are re-morphing. // Note that we won't do this straightaway if this is a SIMD type, since it // may be a promoted lclVar (sometimes we promote the individual float fields of // fixed-size SIMD). if (dest->OperGet() == GT_IND) { noway_assert(asgType != TYP_STRUCT); if (varTypeIsStruct(asgType)) { destLclVarTree = fgIsIndirOfAddrOfLocal(dest); } if (isCopyBlock && destLclVarTree == nullptr && !src->OperIs(GT_LCL_VAR)) { fgMorphBlockOperand(src, asgType, genTypeSize(asgType), false /*isBlkReqd*/); dest->gtFlags |= GTF_DONT_CSE; return tree; } } else { noway_assert(dest->OperIsLocal()); destLclVarTree = dest; } if (destLclVarTree != nullptr) { destVarNum = destLclVarTree->AsLclVarCommon()->GetLclNum(); destVarDsc = lvaGetDesc(destVarNum); if (asgType == TYP_STRUCT) { clsHnd = destVarDsc->GetStructHnd(); size = destVarDsc->lvExactSize; } } if (asgType != TYP_STRUCT) { size = genTypeSize(asgType); } } if (size == 0) { return nullptr; } if ((destVarDsc != nullptr) && varTypeIsStruct(destLclVarTree) && destVarDsc->lvPromoted) { // Let fgMorphCopyBlock handle it. return nullptr; } if (src->IsCall() || src->OperIsSIMD()) { // Can't take ADDR from these nodes, let fgMorphCopyBlock handle it, #11413. return nullptr; } if ((destVarDsc != nullptr) && !varTypeIsStruct(destVarDsc->TypeGet())) { // // See if we can do a simple transformation: // // GT_ASG <TYP_size> // / \. // GT_IND GT_IND or CNS_INT // | | // [dest] [src] // if (asgType == TYP_STRUCT) { // It is possible to use `initobj` to init a primitive type on the stack, // like `ldloca.s 1; initobj 1B000003` where `V01` has type `ref`; // in this case we generate `ASG struct(BLK<8> struct(ADDR byref(LCL_VAR ref)), 0)` // and this code path transforms it into `ASG ref(LCL_VARref, 0)` because it is not a real // struct assignment. if (size == REGSIZE_BYTES) { if (clsHnd == NO_CLASS_HANDLE) { // A register-sized cpblk can be treated as an integer asignment. asgType = TYP_I_IMPL; } else { BYTE gcPtr; info.compCompHnd->getClassGClayout(clsHnd, &gcPtr); asgType = getJitGCType(gcPtr); } } else { switch (size) { case 1: asgType = TYP_BYTE; break; case 2: asgType = TYP_SHORT; break; #ifdef TARGET_64BIT case 4: asgType = TYP_INT; break; #endif // TARGET_64BIT } } } } GenTree* srcLclVarTree = nullptr; LclVarDsc* srcVarDsc = nullptr; if (isCopyBlock) { if (src->OperGet() == GT_LCL_VAR) { srcLclVarTree = src; srcVarDsc = lvaGetDesc(src->AsLclVarCommon()); } else if (src->OperIsIndir() && impIsAddressInLocal(src->AsOp()->gtOp1, &srcLclVarTree)) { srcVarDsc = lvaGetDesc(srcLclVarTree->AsLclVarCommon()); } if ((srcVarDsc != nullptr) && varTypeIsStruct(srcLclVarTree) && srcVarDsc->lvPromoted) { // Let fgMorphCopyBlock handle it. return nullptr; } } if (asgType != TYP_STRUCT) { noway_assert((size <= REGSIZE_BYTES) || varTypeIsSIMD(asgType)); // For initBlk, a non constant source is not going to allow us to fiddle // with the bits to create a single assigment. // Nor do we (for now) support transforming an InitBlock of SIMD type, unless // it is a direct assignment to a lclVar and the value is zero. if (isInitBlock) { if (!src->IsConstInitVal()) { return nullptr; } if (varTypeIsSIMD(asgType) && (!src->IsIntegralConst(0) || (destVarDsc == nullptr))) { return nullptr; } } if (destVarDsc != nullptr) { // Kill everything about dest if (optLocalAssertionProp) { if (optAssertionCount > 0) { fgKillDependentAssertions(destVarNum DEBUGARG(tree)); } } // A previous incarnation of this code also required the local not to be // address-exposed(=taken). That seems orthogonal to the decision of whether // to do field-wise assignments: being address-exposed will cause it to be // "dependently" promoted, so it will be in the right memory location. One possible // further reason for avoiding field-wise stores is that the struct might have alignment-induced // holes, whose contents could be meaningful in unsafe code. If we decide that's a valid // concern, then we could compromise, and say that address-exposed + fields do not completely cover the // memory of the struct prevent field-wise assignments. Same situation exists for the "src" decision. if (varTypeIsStruct(destLclVarTree) && destVarDsc->lvPromoted) { // Let fgMorphInitBlock handle it. (Since we'll need to do field-var-wise assignments.) return nullptr; } else if (!varTypeIsFloating(destLclVarTree->TypeGet()) && (size == genTypeSize(destVarDsc))) { // Use the dest local var directly, as well as its type. dest = destLclVarTree; asgType = destVarDsc->lvType; // If the block operation had been a write to a local var of a small int type, // of the exact size of the small int type, and the var is NormalizeOnStore, // we would have labeled it GTF_VAR_USEASG, because the block operation wouldn't // have done that normalization. If we're now making it into an assignment, // the NormalizeOnStore will work, and it can be a full def. if (destVarDsc->lvNormalizeOnStore()) { dest->gtFlags &= (~GTF_VAR_USEASG); } } else { // Could be a non-promoted struct, or a floating point type local, or // an int subject to a partial write. Don't enregister. lvaSetVarDoNotEnregister(destVarNum DEBUGARG(DoNotEnregisterReason::OneAsgRetyping)); // Mark the local var tree as a definition point of the local. destLclVarTree->gtFlags |= GTF_VAR_DEF; if (size < destVarDsc->lvExactSize) { // If it's not a full-width assignment.... destLclVarTree->gtFlags |= GTF_VAR_USEASG; } if (dest == destLclVarTree) { GenTree* addr = gtNewOperNode(GT_ADDR, TYP_BYREF, dest); dest = gtNewIndir(asgType, addr); } } } // Check to ensure we don't have a reducible *(& ... ) if (dest->OperIsIndir() && dest->AsIndir()->Addr()->OperGet() == GT_ADDR) { // If dest is an Indir or Block, and it has a child that is a Addr node // GenTree* addrNode = dest->AsIndir()->Addr(); // known to be a GT_ADDR // Can we just remove the Ind(Addr(destOp)) and operate directly on 'destOp'? // GenTree* destOp = addrNode->gtGetOp1(); var_types destOpType = destOp->TypeGet(); // We can if we have a primitive integer type and the sizes are exactly the same. // if ((varTypeIsIntegralOrI(destOp) && (size == genTypeSize(destOpType)))) { dest = destOp; asgType = destOpType; } } if (dest->gtEffectiveVal()->OperIsIndir()) { // If we have no information about the destination, we have to assume it could // live anywhere (not just in the GC heap). // Mark the GT_IND node so that we use the correct write barrier helper in case // the field is a GC ref. if (!fgIsIndirOfAddrOfLocal(dest)) { dest->gtFlags |= (GTF_GLOB_REF | GTF_IND_TGTANYWHERE); tree->gtFlags |= GTF_GLOB_REF; } dest->SetIndirExceptionFlags(this); tree->gtFlags |= (dest->gtFlags & GTF_EXCEPT); } if (isCopyBlock) { if (srcVarDsc != nullptr) { // Handled above. assert(!varTypeIsStruct(srcLclVarTree) || !srcVarDsc->lvPromoted); if (!varTypeIsFloating(srcLclVarTree->TypeGet()) && size == genTypeSize(genActualType(srcLclVarTree->TypeGet()))) { // Use the src local var directly. src = srcLclVarTree; } else { // The source argument of the copyblk can potentially be accessed only through indir(addr(lclVar)) // or indir(lclVarAddr) so it must be on the stack. unsigned lclVarNum = srcLclVarTree->AsLclVarCommon()->GetLclNum(); lvaSetVarDoNotEnregister(lclVarNum DEBUGARG(DoNotEnregisterReason::OneAsgRetyping)); GenTree* srcAddr; if (src == srcLclVarTree) { srcAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, src); src = gtNewOperNode(GT_IND, asgType, srcAddr); } else { assert(src->OperIsIndir()); } } } if (src->OperIsIndir()) { if (!fgIsIndirOfAddrOfLocal(src)) { // If we have no information about the src, we have to assume it could // live anywhere (not just in the GC heap). // Mark the GT_IND node so that we use the correct write barrier helper in case // the field is a GC ref. src->gtFlags |= (GTF_GLOB_REF | GTF_IND_TGTANYWHERE); } src->SetIndirExceptionFlags(this); } } else // InitBlk { #ifdef FEATURE_SIMD if (varTypeIsSIMD(asgType)) { assert(!isCopyBlock); // Else we would have returned the tree above. noway_assert(src->IsIntegralConst(0)); noway_assert(destVarDsc != nullptr); src = gtNewSIMDNode(asgType, src, SIMDIntrinsicInit, destVarDsc->GetSimdBaseJitType(), size); } else #endif { if (src->OperIsInitVal()) { src = src->gtGetOp1(); } assert(src->IsCnsIntOrI()); // This will mutate the integer constant, in place, to be the correct // value for the type we are using in the assignment. src->AsIntCon()->FixupInitBlkValue(asgType); } } // Ensure that the dest is setup appropriately. if (dest->gtEffectiveVal()->OperIsIndir()) { dest = fgMorphBlockOperand(dest, asgType, size, false /*isBlkReqd*/); } // Ensure that the rhs is setup appropriately. if (isCopyBlock) { src = fgMorphBlockOperand(src, asgType, size, false /*isBlkReqd*/); } // Set the lhs and rhs on the assignment. if (dest != tree->AsOp()->gtOp1) { asg->AsOp()->gtOp1 = dest; } if (src != asg->AsOp()->gtOp2) { asg->AsOp()->gtOp2 = src; } asg->ChangeType(asgType); dest->gtFlags |= GTF_DONT_CSE; asg->gtFlags &= ~GTF_EXCEPT; asg->gtFlags |= ((dest->gtFlags | src->gtFlags) & GTF_ALL_EFFECT); // Un-set GTF_REVERSE_OPS, and it will be set later if appropriate. asg->gtFlags &= ~GTF_REVERSE_OPS; #ifdef DEBUG if (verbose) { printf("fgMorphOneAsgBlock (after):\n"); gtDispTree(tree); } #endif return tree; } return nullptr; } //------------------------------------------------------------------------ // fgMorphPromoteLocalInitBlock: Attempts to promote a local block init tree // to a tree of promoted field initialization assignments. // // Arguments: // destLclNode - The destination LclVar node // initVal - The initialization value // blockSize - The amount of bytes to initialize // // Return Value: // A tree that performs field by field initialization of the destination // struct variable if various conditions are met, nullptr otherwise. // // Notes: // This transforms a single block initialization assignment like: // // * ASG struct (init) // +--* BLK(12) struct // | \--* ADDR long // | \--* LCL_VAR struct(P) V02 loc0 // | \--* int V02.a (offs=0x00) -> V06 tmp3 // | \--* ubyte V02.c (offs=0x04) -> V07 tmp4 // | \--* float V02.d (offs=0x08) -> V08 tmp5 // \--* INIT_VAL int // \--* CNS_INT int 42 // // into a COMMA tree of assignments that initialize each promoted struct // field: // // * COMMA void // +--* COMMA void // | +--* ASG int // | | +--* LCL_VAR int V06 tmp3 // | | \--* CNS_INT int 0x2A2A2A2A // | \--* ASG ubyte // | +--* LCL_VAR ubyte V07 tmp4 // | \--* CNS_INT int 42 // \--* ASG float // +--* LCL_VAR float V08 tmp5 // \--* CNS_DBL float 1.5113661732714390e-13 // GenTree* Compiler::fgMorphPromoteLocalInitBlock(GenTreeLclVar* destLclNode, GenTree* initVal, unsigned blockSize) { assert(destLclNode->OperIs(GT_LCL_VAR)); LclVarDsc* destLclVar = lvaGetDesc(destLclNode); assert(varTypeIsStruct(destLclVar->TypeGet())); assert(destLclVar->lvPromoted); if (blockSize == 0) { JITDUMP(" size is zero or unknown.\n"); return nullptr; } if (destLclVar->IsAddressExposed() && destLclVar->lvContainsHoles) { JITDUMP(" dest is address exposed and contains holes.\n"); return nullptr; } if (destLclVar->lvCustomLayout && destLclVar->lvContainsHoles) { // TODO-1stClassStructs: there are no reasons for this pessimization, delete it. JITDUMP(" dest has custom layout and contains holes.\n"); return nullptr; } if (destLclVar->lvExactSize != blockSize) { JITDUMP(" dest size mismatch.\n"); return nullptr; } if (!initVal->OperIs(GT_CNS_INT)) { JITDUMP(" source is not constant.\n"); return nullptr; } const int64_t initPattern = (initVal->AsIntCon()->IconValue() & 0xFF) * 0x0101010101010101LL; if (initPattern != 0) { for (unsigned i = 0; i < destLclVar->lvFieldCnt; ++i) { LclVarDsc* fieldDesc = lvaGetDesc(destLclVar->lvFieldLclStart + i); if (varTypeIsSIMD(fieldDesc->TypeGet()) || varTypeIsGC(fieldDesc->TypeGet())) { // Cannot initialize GC or SIMD types with a non-zero constant. // The former is completly bogus. The later restriction could be // lifted by supporting non-zero SIMD constants or by generating // field initialization code that converts an integer constant to // the appropiate SIMD value. Unlikely to be very useful, though. JITDUMP(" dest contains GC and/or SIMD fields and source constant is not 0.\n"); return nullptr; } } } JITDUMP(" using field by field initialization.\n"); GenTree* tree = nullptr; for (unsigned i = 0; i < destLclVar->lvFieldCnt; ++i) { unsigned fieldLclNum = destLclVar->lvFieldLclStart + i; LclVarDsc* fieldDesc = lvaGetDesc(fieldLclNum); GenTree* dest = gtNewLclvNode(fieldLclNum, fieldDesc->TypeGet()); // If it had been labeled a "USEASG", assignments to the individual promoted fields are not. dest->gtFlags |= (destLclNode->gtFlags & ~(GTF_NODE_MASK | GTF_VAR_USEASG)); GenTree* src; switch (dest->TypeGet()) { case TYP_BOOL: case TYP_BYTE: case TYP_UBYTE: case TYP_SHORT: case TYP_USHORT: // Promoted fields are expected to be "normalize on load". If that changes then // we may need to adjust this code to widen the constant correctly. assert(fieldDesc->lvNormalizeOnLoad()); FALLTHROUGH; case TYP_INT: { int64_t mask = (int64_t(1) << (genTypeSize(dest->TypeGet()) * 8)) - 1; src = gtNewIconNode(static_cast<int32_t>(initPattern & mask)); break; } case TYP_LONG: src = gtNewLconNode(initPattern); break; case TYP_FLOAT: float floatPattern; memcpy(&floatPattern, &initPattern, sizeof(floatPattern)); src = gtNewDconNode(floatPattern, dest->TypeGet()); break; case TYP_DOUBLE: double doublePattern; memcpy(&doublePattern, &initPattern, sizeof(doublePattern)); src = gtNewDconNode(doublePattern, dest->TypeGet()); break; case TYP_REF: case TYP_BYREF: #ifdef FEATURE_SIMD case TYP_SIMD8: case TYP_SIMD12: case TYP_SIMD16: case TYP_SIMD32: #endif // FEATURE_SIMD assert(initPattern == 0); src = gtNewIconNode(0, dest->TypeGet()); break; default: unreached(); } GenTree* asg = gtNewAssignNode(dest, src); if (optLocalAssertionProp) { optAssertionGen(asg); } if (tree != nullptr) { tree = gtNewOperNode(GT_COMMA, TYP_VOID, tree, asg); } else { tree = asg; } } return tree; } //------------------------------------------------------------------------ // fgMorphGetStructAddr: Gets the address of a struct object // // Arguments: // pTree - the parent's pointer to the struct object node // clsHnd - the class handle for the struct type // isRValue - true if this is a source (not dest) // // Return Value: // Returns the address of the struct value, possibly modifying the existing tree to // sink the address below any comma nodes (this is to canonicalize for value numbering). // If this is a source, it will morph it to an GT_IND before taking its address, // since it may not be remorphed (and we don't want blk nodes as rvalues). GenTree* Compiler::fgMorphGetStructAddr(GenTree** pTree, CORINFO_CLASS_HANDLE clsHnd, bool isRValue) { GenTree* addr; GenTree* tree = *pTree; // If this is an indirection, we can return its op1, unless it's a GTF_IND_ARR_INDEX, in which case we // need to hang onto that for the purposes of value numbering. if (tree->OperIsIndir()) { if ((tree->gtFlags & GTF_IND_ARR_INDEX) == 0) { addr = tree->AsOp()->gtOp1; } else { if (isRValue && tree->OperIsBlk()) { tree->ChangeOper(GT_IND); } addr = gtNewOperNode(GT_ADDR, TYP_BYREF, tree); } } else if (tree->gtOper == GT_COMMA) { // If this is a comma, we're going to "sink" the GT_ADDR below it. (void)fgMorphGetStructAddr(&(tree->AsOp()->gtOp2), clsHnd, isRValue); tree->gtType = TYP_BYREF; addr = tree; } else { switch (tree->gtOper) { case GT_LCL_FLD: case GT_LCL_VAR: case GT_INDEX: case GT_FIELD: case GT_ARR_ELEM: addr = gtNewOperNode(GT_ADDR, TYP_BYREF, tree); break; case GT_INDEX_ADDR: addr = tree; break; default: { // TODO: Consider using lvaGrabTemp and gtNewTempAssign instead, since we're // not going to use "temp" GenTree* temp = fgInsertCommaFormTemp(pTree, clsHnd); unsigned lclNum = temp->gtEffectiveVal()->AsLclVar()->GetLclNum(); lvaSetVarDoNotEnregister(lclNum DEBUG_ARG(DoNotEnregisterReason::VMNeedsStackAddr)); addr = fgMorphGetStructAddr(pTree, clsHnd, isRValue); break; } } } *pTree = addr; return addr; } //------------------------------------------------------------------------ // fgMorphBlockOperand: Canonicalize an operand of a block assignment // // Arguments: // tree - The block operand // asgType - The type of the assignment // blockWidth - The size of the block // isBlkReqd - true iff this operand must remain a block node // // Return Value: // Returns the morphed block operand // // Notes: // This does the following: // - Ensures that a struct operand is a block node or lclVar. // - Ensures that any COMMAs are above ADDR nodes. // Although 'tree' WAS an operand of a block assignment, the assignment // may have been retyped to be a scalar assignment. GenTree* Compiler::fgMorphBlockOperand(GenTree* tree, var_types asgType, unsigned blockWidth, bool isBlkReqd) { GenTree* effectiveVal = tree->gtEffectiveVal(); if (asgType != TYP_STRUCT) { if (effectiveVal->OperIsIndir()) { if (!isBlkReqd) { GenTree* addr = effectiveVal->AsIndir()->Addr(); if ((addr->OperGet() == GT_ADDR) && (addr->gtGetOp1()->TypeGet() == asgType)) { effectiveVal = addr->gtGetOp1(); } else if (effectiveVal->OperIsBlk()) { effectiveVal->SetOper(GT_IND); } } effectiveVal->gtType = asgType; } else if (effectiveVal->TypeGet() != asgType) { if (effectiveVal->IsCall()) { #ifdef DEBUG GenTreeCall* call = effectiveVal->AsCall(); assert(call->TypeGet() == TYP_STRUCT); assert(blockWidth == info.compCompHnd->getClassSize(call->gtRetClsHnd)); #endif } else { GenTree* addr = gtNewOperNode(GT_ADDR, TYP_BYREF, effectiveVal); effectiveVal = gtNewIndir(asgType, addr); } } } else { GenTreeIndir* indirTree = nullptr; GenTreeLclVarCommon* lclNode = nullptr; bool needsIndirection = true; if (effectiveVal->OperIsIndir()) { indirTree = effectiveVal->AsIndir(); GenTree* addr = effectiveVal->AsIndir()->Addr(); if ((addr->OperGet() == GT_ADDR) && (addr->gtGetOp1()->OperGet() == GT_LCL_VAR)) { lclNode = addr->gtGetOp1()->AsLclVarCommon(); } } else if (effectiveVal->OperGet() == GT_LCL_VAR) { lclNode = effectiveVal->AsLclVarCommon(); } else if (effectiveVal->IsCall()) { needsIndirection = false; #ifdef DEBUG GenTreeCall* call = effectiveVal->AsCall(); assert(call->TypeGet() == TYP_STRUCT); assert(blockWidth == info.compCompHnd->getClassSize(call->gtRetClsHnd)); #endif } #ifdef TARGET_ARM64 else if (effectiveVal->OperIsHWIntrinsic()) { needsIndirection = false; #ifdef DEBUG GenTreeHWIntrinsic* intrinsic = effectiveVal->AsHWIntrinsic(); assert(intrinsic->TypeGet() == TYP_STRUCT); assert(HWIntrinsicInfo::IsMultiReg(intrinsic->GetHWIntrinsicId())); #endif } #endif // TARGET_ARM64 if (lclNode != nullptr) { const LclVarDsc* varDsc = lvaGetDesc(lclNode); if (varTypeIsStruct(varDsc) && (varDsc->lvExactSize == blockWidth) && (varDsc->lvType == asgType)) { if (effectiveVal != lclNode) { JITDUMP("Replacing block node [%06d] with lclVar V%02u\n", dspTreeID(tree), lclNode->GetLclNum()); effectiveVal = lclNode; } needsIndirection = false; } else { // This may be a lclVar that was determined to be address-exposed. effectiveVal->gtFlags |= (lclNode->gtFlags & GTF_ALL_EFFECT); } } if (needsIndirection) { if (indirTree != nullptr) { // If we have an indirection and a block is required, it should already be a block. assert(indirTree->OperIsBlk() || !isBlkReqd); effectiveVal->gtType = asgType; } else { GenTree* newTree; GenTree* addr = gtNewOperNode(GT_ADDR, TYP_BYREF, effectiveVal); if (isBlkReqd) { CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleIfPresent(effectiveVal); if (clsHnd == NO_CLASS_HANDLE) { newTree = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, addr, typGetBlkLayout(blockWidth)); } else { newTree = gtNewObjNode(clsHnd, addr); gtSetObjGcInfo(newTree->AsObj()); } } else { newTree = gtNewIndir(asgType, addr); } effectiveVal = newTree; } } } assert(effectiveVal->TypeIs(asgType) || (varTypeIsSIMD(asgType) && varTypeIsStruct(effectiveVal))); tree = effectiveVal; return tree; } //------------------------------------------------------------------------ // fgMorphCanUseLclFldForCopy: check if we can access LclVar2 using LclVar1's fields. // // Arguments: // lclNum1 - a promoted lclVar that is used in fieldwise assignment; // lclNum2 - the local variable on the other side of ASG, can be BAD_VAR_NUM. // // Return Value: // True if the second local is valid and has the same struct handle as the first, // false otherwise. // // Notes: // This check is needed to avoid accessing LCL_VARs with incorrect // CORINFO_FIELD_HANDLE that would confuse VN optimizations. // bool Compiler::fgMorphCanUseLclFldForCopy(unsigned lclNum1, unsigned lclNum2) { assert(lclNum1 != BAD_VAR_NUM); if (lclNum2 == BAD_VAR_NUM) { return false; } const LclVarDsc* varDsc1 = lvaGetDesc(lclNum1); const LclVarDsc* varDsc2 = lvaGetDesc(lclNum2); assert(varTypeIsStruct(varDsc1)); if (!varTypeIsStruct(varDsc2)) { return false; } CORINFO_CLASS_HANDLE struct1 = varDsc1->GetStructHnd(); CORINFO_CLASS_HANDLE struct2 = varDsc2->GetStructHnd(); assert(struct1 != NO_CLASS_HANDLE); assert(struct2 != NO_CLASS_HANDLE); if (struct1 != struct2) { return false; } return true; } // insert conversions and normalize to make tree amenable to register // FP architectures GenTree* Compiler::fgMorphForRegisterFP(GenTree* tree) { if (tree->OperIsArithmetic()) { if (varTypeIsFloating(tree)) { GenTree* op1 = tree->AsOp()->gtOp1; GenTree* op2 = tree->gtGetOp2(); assert(varTypeIsFloating(op1->TypeGet()) && varTypeIsFloating(op2->TypeGet())); if (op1->TypeGet() != tree->TypeGet()) { tree->AsOp()->gtOp1 = gtNewCastNode(tree->TypeGet(), op1, false, tree->TypeGet()); } if (op2->TypeGet() != tree->TypeGet()) { tree->AsOp()->gtOp2 = gtNewCastNode(tree->TypeGet(), op2, false, tree->TypeGet()); } } } else if (tree->OperIsCompare()) { GenTree* op1 = tree->AsOp()->gtOp1; if (varTypeIsFloating(op1)) { GenTree* op2 = tree->gtGetOp2(); assert(varTypeIsFloating(op2)); if (op1->TypeGet() != op2->TypeGet()) { // both had better be floating, just one bigger than other if (op1->TypeGet() == TYP_FLOAT) { assert(op2->TypeGet() == TYP_DOUBLE); tree->AsOp()->gtOp1 = gtNewCastNode(TYP_DOUBLE, op1, false, TYP_DOUBLE); } else if (op2->TypeGet() == TYP_FLOAT) { assert(op1->TypeGet() == TYP_DOUBLE); tree->AsOp()->gtOp2 = gtNewCastNode(TYP_DOUBLE, op2, false, TYP_DOUBLE); } } } } return tree; } #ifdef FEATURE_SIMD //-------------------------------------------------------------------------------------------------------------- // getSIMDStructFromField: // Checking whether the field belongs to a simd struct or not. If it is, return the GenTree* for // the struct node, also base type, field index and simd size. If it is not, just return nullptr. // Usually if the tree node is from a simd lclvar which is not used in any SIMD intrinsic, then we // should return nullptr, since in this case we should treat SIMD struct as a regular struct. // However if no matter what, you just want get simd struct node, you can set the ignoreUsedInSIMDIntrinsic // as true. Then there will be no IsUsedInSIMDIntrinsic checking, and it will return SIMD struct node // if the struct is a SIMD struct. // // Arguments: // tree - GentreePtr. This node will be checked to see this is a field which belongs to a simd // struct used for simd intrinsic or not. // simdBaseJitTypeOut - CorInfoType pointer, if the tree node is the tree we want, we set *simdBaseJitTypeOut // to simd lclvar's base JIT type. // indexOut - unsigned pointer, if the tree is used for simd intrinsic, we will set *indexOut // equals to the index number of this field. // simdSizeOut - unsigned pointer, if the tree is used for simd intrinsic, set the *simdSizeOut // equals to the simd struct size which this tree belongs to. // ignoreUsedInSIMDIntrinsic - bool. If this is set to true, then this function will ignore // the UsedInSIMDIntrinsic check. // // return value: // A GenTree* which points the simd lclvar tree belongs to. If the tree is not the simd // instrinic related field, return nullptr. // GenTree* Compiler::getSIMDStructFromField(GenTree* tree, CorInfoType* simdBaseJitTypeOut, unsigned* indexOut, unsigned* simdSizeOut, bool ignoreUsedInSIMDIntrinsic /*false*/) { GenTree* ret = nullptr; if (tree->OperGet() == GT_FIELD) { GenTree* objRef = tree->AsField()->GetFldObj(); if (objRef != nullptr) { GenTree* obj = nullptr; if (objRef->gtOper == GT_ADDR) { obj = objRef->AsOp()->gtOp1; } else if (ignoreUsedInSIMDIntrinsic) { obj = objRef; } else { return nullptr; } if (isSIMDTypeLocal(obj)) { LclVarDsc* varDsc = lvaGetDesc(obj->AsLclVarCommon()); if (varDsc->lvIsUsedInSIMDIntrinsic() || ignoreUsedInSIMDIntrinsic) { *simdSizeOut = varDsc->lvExactSize; *simdBaseJitTypeOut = getBaseJitTypeOfSIMDLocal(obj); ret = obj; } } else if (obj->OperGet() == GT_SIMD) { ret = obj; GenTreeSIMD* simdNode = obj->AsSIMD(); *simdSizeOut = simdNode->GetSimdSize(); *simdBaseJitTypeOut = simdNode->GetSimdBaseJitType(); } #ifdef FEATURE_HW_INTRINSICS else if (obj->OperIsHWIntrinsic()) { ret = obj; GenTreeHWIntrinsic* simdNode = obj->AsHWIntrinsic(); *simdSizeOut = simdNode->GetSimdSize(); *simdBaseJitTypeOut = simdNode->GetSimdBaseJitType(); } #endif // FEATURE_HW_INTRINSICS } } if (ret != nullptr) { var_types fieldType = tree->TypeGet(); if (fieldType == TYP_LONG) { // Vector2/3/4 expose public float fields while Vector<T> // and Vector64/128/256<T> have internal ulong fields. So // we should only ever encounter accesses for TYP_FLOAT or // TYP_LONG and in the case of the latter we don't want the // generic type since we are executing some algorithm on the // raw underlying bits instead. *simdBaseJitTypeOut = CORINFO_TYPE_ULONG; } else { assert(fieldType == TYP_FLOAT); } unsigned baseTypeSize = genTypeSize(JITtype2varType(*simdBaseJitTypeOut)); *indexOut = tree->AsField()->gtFldOffset / baseTypeSize; } return ret; } /***************************************************************************** * If a read operation tries to access simd struct field, then transform the operation * to the SimdGetElementNode, and return the new tree. Otherwise, return the old tree. * Argument: * tree - GenTree*. If this pointer points to simd struct which is used for simd * intrinsic, we will morph it as simd intrinsic NI_Vector128_GetElement. * Return: * A GenTree* which points to the new tree. If the tree is not for simd intrinsic, * return nullptr. */ GenTree* Compiler::fgMorphFieldToSimdGetElement(GenTree* tree) { unsigned index = 0; CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF; unsigned simdSize = 0; GenTree* simdStructNode = getSIMDStructFromField(tree, &simdBaseJitType, &index, &simdSize); if (simdStructNode != nullptr) { var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); GenTree* op2 = gtNewIconNode(index, TYP_INT); assert(simdSize <= 16); assert(simdSize >= ((index + 1) * genTypeSize(simdBaseType))); tree = gtNewSimdGetElementNode(simdBaseType, simdStructNode, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } return tree; } /***************************************************************************** * Transform an assignment of a SIMD struct field to SimdWithElementNode, and * return a new tree. If it is not such an assignment, then return the old tree. * Argument: * tree - GenTree*. If this pointer points to simd struct which is used for simd * intrinsic, we will morph it as simd intrinsic set. * Return: * A GenTree* which points to the new tree. If the tree is not for simd intrinsic, * return nullptr. */ GenTree* Compiler::fgMorphFieldAssignToSimdSetElement(GenTree* tree) { assert(tree->OperGet() == GT_ASG); unsigned index = 0; CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF; unsigned simdSize = 0; GenTree* simdStructNode = getSIMDStructFromField(tree->gtGetOp1(), &simdBaseJitType, &index, &simdSize); if (simdStructNode != nullptr) { var_types simdType = simdStructNode->gtType; var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(simdSize <= 16); assert(simdSize >= ((index + 1) * genTypeSize(simdBaseType))); GenTree* op2 = gtNewIconNode(index, TYP_INT); GenTree* op3 = tree->gtGetOp2(); NamedIntrinsic intrinsicId = NI_Vector128_WithElement; GenTree* target = gtClone(simdStructNode); assert(target != nullptr); GenTree* simdTree = gtNewSimdWithElementNode(simdType, simdStructNode, op2, op3, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); tree->AsOp()->gtOp1 = target; tree->AsOp()->gtOp2 = simdTree; // fgMorphTree has already called fgMorphImplicitByRefArgs() on this assignment, but the source // and target have not yet been morphed. // Therefore, in case the source and/or target are now implicit byrefs, we need to call it again. if (fgMorphImplicitByRefArgs(tree)) { if (tree->gtGetOp1()->OperIsBlk()) { assert(tree->gtGetOp1()->TypeGet() == simdType); tree->gtGetOp1()->SetOper(GT_IND); tree->gtGetOp1()->gtType = simdType; } } #ifdef DEBUG tree->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif } return tree; } #endif // FEATURE_SIMD //------------------------------------------------------------------------------ // fgMorphCommutative : Try to simplify "(X op C1) op C2" to "X op C3" // for commutative operators. // // Arguments: // tree - node to fold // // return value: // A folded GenTree* instance or nullptr if something prevents folding. // GenTreeOp* Compiler::fgMorphCommutative(GenTreeOp* tree) { assert(varTypeIsIntegralOrI(tree->TypeGet())); assert(tree->OperIs(GT_ADD, GT_MUL, GT_OR, GT_AND, GT_XOR)); // op1 can be GT_COMMA, in this case we're going to fold // "(op (COMMA(... (op X C1))) C2)" to "(COMMA(... (op X C3)))" GenTree* op1 = tree->gtGetOp1()->gtEffectiveVal(true); genTreeOps oper = tree->OperGet(); if (!op1->OperIs(oper) || !tree->gtGetOp2()->IsCnsIntOrI() || !op1->gtGetOp2()->IsCnsIntOrI() || op1->gtGetOp1()->IsCnsIntOrI()) { return nullptr; } if (!fgGlobalMorph && (op1 != tree->gtGetOp1())) { // Since 'tree->gtGetOp1()' can have complex structure (e.g. COMMA(..(COMMA(..,op1))) // don't run the optimization for such trees outside of global morph. // Otherwise, there is a chance of violating VNs invariants and/or modifying a tree // that is an active CSE candidate. return nullptr; } if (gtIsActiveCSE_Candidate(tree) || gtIsActiveCSE_Candidate(op1)) { // The optimization removes 'tree' from IR and changes the value of 'op1'. return nullptr; } if (tree->OperMayOverflow() && (tree->gtOverflow() || op1->gtOverflow())) { return nullptr; } GenTreeIntCon* cns1 = op1->gtGetOp2()->AsIntCon(); GenTreeIntCon* cns2 = tree->gtGetOp2()->AsIntCon(); if (!varTypeIsIntegralOrI(tree->TypeGet()) || cns1->TypeIs(TYP_REF) || !cns1->TypeIs(cns2->TypeGet())) { return nullptr; } if (gtIsActiveCSE_Candidate(cns1) || gtIsActiveCSE_Candidate(cns2)) { // The optimization removes 'cns2' from IR and changes the value of 'cns1'. return nullptr; } GenTree* folded = gtFoldExprConst(gtNewOperNode(oper, cns1->TypeGet(), cns1, cns2)); if (!folded->IsCnsIntOrI()) { // Give up if we can't fold "C1 op C2" return nullptr; } auto foldedCns = folded->AsIntCon(); cns1->SetIconValue(foldedCns->IconValue()); cns1->SetVNsFromNode(foldedCns); cns1->gtFieldSeq = foldedCns->gtFieldSeq; op1 = tree->gtGetOp1(); op1->SetVNsFromNode(tree); DEBUG_DESTROY_NODE(tree); DEBUG_DESTROY_NODE(cns2); DEBUG_DESTROY_NODE(foldedCns); INDEBUG(cns1->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED); return op1->AsOp(); } //------------------------------------------------------------------------------ // fgMorphCastedBitwiseOp : Try to simplify "(T)x op (T)y" to "(T)(x op y)". // // Arguments: // tree - node to fold // // Return Value: // A folded GenTree* instance, or nullptr if it couldn't be folded GenTree* Compiler::fgMorphCastedBitwiseOp(GenTreeOp* tree) { // This transform does not preserve VNs and deletes a node. assert(fgGlobalMorph); assert(varTypeIsIntegralOrI(tree)); assert(tree->OperIs(GT_OR, GT_AND, GT_XOR)); GenTree* op1 = tree->gtGetOp1(); GenTree* op2 = tree->gtGetOp2(); genTreeOps oper = tree->OperGet(); // see whether both ops are casts, with matching to and from types. if (op1->OperIs(GT_CAST) && op2->OperIs(GT_CAST)) { // bail if either operand is a checked cast if (op1->gtOverflow() || op2->gtOverflow()) { return nullptr; } var_types fromType = op1->AsCast()->CastOp()->TypeGet(); var_types toType = op1->AsCast()->CastToType(); bool isUnsigned = op1->IsUnsigned(); if (varTypeIsFloating(fromType) || (op2->CastFromType() != fromType) || (op2->CastToType() != toType) || (op2->IsUnsigned() != isUnsigned)) { return nullptr; } /* // Reuse gentree nodes: // // tree op1 // / \ | // op1 op2 ==> tree // | | / \. // x y x y // // (op2 becomes garbage) */ tree->gtOp1 = op1->AsCast()->CastOp(); tree->gtOp2 = op2->AsCast()->CastOp(); tree->gtType = genActualType(fromType); op1->gtType = genActualType(toType); op1->AsCast()->gtOp1 = tree; op1->AsCast()->CastToType() = toType; op1->SetAllEffectsFlags(tree); // no need to update isUnsigned DEBUG_DESTROY_NODE(op2); INDEBUG(op1->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED); return op1; } return nullptr; } /***************************************************************************** * * Transform the given GTK_SMPOP tree for code generation. */ #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function #endif GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac) { ALLOCA_CHECK(); assert(tree->OperKind() & GTK_SMPOP); /* The steps in this function are : o Perform required preorder processing o Process the first, then second operand, if any o Perform required postorder morphing o Perform optional postorder morphing if optimizing */ bool isQmarkColon = false; AssertionIndex origAssertionCount = DUMMY_INIT(0); AssertionDsc* origAssertionTab = DUMMY_INIT(NULL); AssertionIndex thenAssertionCount = DUMMY_INIT(0); AssertionDsc* thenAssertionTab = DUMMY_INIT(NULL); if (fgGlobalMorph) { tree = fgMorphForRegisterFP(tree); } genTreeOps oper = tree->OperGet(); var_types typ = tree->TypeGet(); GenTree* op1 = tree->AsOp()->gtOp1; GenTree* op2 = tree->gtGetOp2IfPresent(); /*------------------------------------------------------------------------- * First do any PRE-ORDER processing */ switch (oper) { // Some arithmetic operators need to use a helper call to the EE int helper; case GT_ASG: tree = fgDoNormalizeOnStore(tree); /* fgDoNormalizeOnStore can change op2 */ noway_assert(op1 == tree->AsOp()->gtOp1); op2 = tree->AsOp()->gtOp2; #ifdef FEATURE_SIMD if (IsBaselineSimdIsaSupported()) { // We should check whether op2 should be assigned to a SIMD field or not. // If it is, we should tranlate the tree to simd intrinsic. assert(!fgGlobalMorph || ((tree->gtDebugFlags & GTF_DEBUG_NODE_MORPHED) == 0)); GenTree* newTree = fgMorphFieldAssignToSimdSetElement(tree); typ = tree->TypeGet(); op1 = tree->gtGetOp1(); op2 = tree->gtGetOp2(); #ifdef DEBUG assert((tree == newTree) && (tree->OperGet() == oper)); if ((tree->gtDebugFlags & GTF_DEBUG_NODE_MORPHED) != 0) { tree->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED; } #endif // DEBUG } #endif // We can't CSE the LHS of an assignment. Only r-values can be CSEed. // Previously, the "lhs" (addr) of a block op was CSE'd. So, to duplicate the former // behavior, allow CSE'ing if is a struct type (or a TYP_REF transformed from a struct type) // TODO-1stClassStructs: improve this. if (op1->IsLocal() || (op1->TypeGet() != TYP_STRUCT)) { op1->gtFlags |= GTF_DONT_CSE; } break; case GT_ADDR: /* op1 of a GT_ADDR is an l-value. Only r-values can be CSEed */ op1->gtFlags |= GTF_DONT_CSE; break; case GT_QMARK: case GT_JTRUE: noway_assert(op1); if (op1->OperIsCompare()) { /* Mark the comparison node with GTF_RELOP_JMP_USED so it knows that it does not need to materialize the result as a 0 or 1. */ /* We also mark it as DONT_CSE, as we don't handle QMARKs with nonRELOP op1s */ op1->gtFlags |= (GTF_RELOP_JMP_USED | GTF_DONT_CSE); // Request that the codegen for op1 sets the condition flags // when it generates the code for op1. // // Codegen for op1 must set the condition flags if // this method returns true. // op1->gtRequestSetFlags(); } else { GenTree* effOp1 = op1->gtEffectiveVal(); noway_assert((effOp1->gtOper == GT_CNS_INT) && (effOp1->IsIntegralConst(0) || effOp1->IsIntegralConst(1))); } break; case GT_COLON: if (optLocalAssertionProp) { isQmarkColon = true; } break; case GT_FIELD: return fgMorphField(tree, mac); case GT_INDEX: return fgMorphArrayIndex(tree); case GT_CAST: { GenTree* morphedCast = fgMorphExpandCast(tree->AsCast()); if (morphedCast != nullptr) { return morphedCast; } op1 = tree->AsCast()->CastOp(); } break; case GT_MUL: noway_assert(op2 != nullptr); if (opts.OptimizationEnabled() && !optValnumCSE_phase && !tree->gtOverflow()) { // MUL(NEG(a), C) => MUL(a, NEG(C)) if (op1->OperIs(GT_NEG) && !op1->gtGetOp1()->IsCnsIntOrI() && op2->IsCnsIntOrI() && !op2->IsIconHandle()) { GenTree* newOp1 = op1->gtGetOp1(); GenTree* newConst = gtNewIconNode(-op2->AsIntCon()->IconValue(), op2->TypeGet()); DEBUG_DESTROY_NODE(op1); DEBUG_DESTROY_NODE(op2); tree->AsOp()->gtOp1 = newOp1; tree->AsOp()->gtOp2 = newConst; return fgMorphSmpOp(tree, mac); } } #ifndef TARGET_64BIT if (typ == TYP_LONG) { // For (long)int1 * (long)int2, we dont actually do the // casts, and just multiply the 32 bit values, which will // give us the 64 bit result in edx:eax. if (tree->Is64RsltMul()) { // We are seeing this node again. // Morph only the children of casts, // so as to avoid losing them. tree = fgMorphLongMul(tree->AsOp()); goto DONE_MORPHING_CHILDREN; } tree = fgRecognizeAndMorphLongMul(tree->AsOp()); op1 = tree->AsOp()->gtGetOp1(); op2 = tree->AsOp()->gtGetOp2(); if (tree->Is64RsltMul()) { goto DONE_MORPHING_CHILDREN; } else { if (tree->gtOverflow()) helper = tree->IsUnsigned() ? CORINFO_HELP_ULMUL_OVF : CORINFO_HELP_LMUL_OVF; else helper = CORINFO_HELP_LMUL; goto USE_HELPER_FOR_ARITH; } } #endif // !TARGET_64BIT break; case GT_ARR_LENGTH: if (op1->OperIs(GT_CNS_STR)) { // Optimize `ldstr + String::get_Length()` to CNS_INT // e.g. "Hello".Length => 5 GenTreeIntCon* iconNode = gtNewStringLiteralLength(op1->AsStrCon()); if (iconNode != nullptr) { INDEBUG(iconNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED); return iconNode; } } break; case GT_DIV: // Replace "val / dcon" with "val * (1.0 / dcon)" if dcon is a power of two. // Powers of two within range are always exactly represented, // so multiplication by the reciprocal is safe in this scenario if (fgGlobalMorph && op2->IsCnsFltOrDbl()) { double divisor = op2->AsDblCon()->gtDconVal; if (((typ == TYP_DOUBLE) && FloatingPointUtils::hasPreciseReciprocal(divisor)) || ((typ == TYP_FLOAT) && FloatingPointUtils::hasPreciseReciprocal(forceCastToFloat(divisor)))) { oper = GT_MUL; tree->ChangeOper(oper); op2->AsDblCon()->gtDconVal = 1.0 / divisor; } } // Convert DIV to UDIV if boths op1 and op2 are known to be never negative if (!gtIsActiveCSE_Candidate(tree) && varTypeIsIntegral(tree) && op1->IsNeverNegative(this) && op2->IsNeverNegative(this)) { assert(tree->OperIs(GT_DIV)); tree->ChangeOper(GT_UDIV, GenTree::PRESERVE_VN); return fgMorphSmpOp(tree, mac); } #ifndef TARGET_64BIT if (typ == TYP_LONG) { helper = CORINFO_HELP_LDIV; goto USE_HELPER_FOR_ARITH; } #if USE_HELPERS_FOR_INT_DIV if (typ == TYP_INT) { helper = CORINFO_HELP_DIV; goto USE_HELPER_FOR_ARITH; } #endif #endif // !TARGET_64BIT break; case GT_UDIV: #ifndef TARGET_64BIT if (typ == TYP_LONG) { helper = CORINFO_HELP_ULDIV; goto USE_HELPER_FOR_ARITH; } #if USE_HELPERS_FOR_INT_DIV if (typ == TYP_INT) { helper = CORINFO_HELP_UDIV; goto USE_HELPER_FOR_ARITH; } #endif #endif // TARGET_64BIT break; case GT_MOD: if (varTypeIsFloating(typ)) { helper = CORINFO_HELP_DBLREM; noway_assert(op2); if (op1->TypeGet() == TYP_FLOAT) { if (op2->TypeGet() == TYP_FLOAT) { helper = CORINFO_HELP_FLTREM; } else { tree->AsOp()->gtOp1 = op1 = gtNewCastNode(TYP_DOUBLE, op1, false, TYP_DOUBLE); } } else if (op2->TypeGet() == TYP_FLOAT) { tree->AsOp()->gtOp2 = op2 = gtNewCastNode(TYP_DOUBLE, op2, false, TYP_DOUBLE); } goto USE_HELPER_FOR_ARITH; } // Convert MOD to UMOD if boths op1 and op2 are known to be never negative if (!gtIsActiveCSE_Candidate(tree) && varTypeIsIntegral(tree) && op1->IsNeverNegative(this) && op2->IsNeverNegative(this)) { assert(tree->OperIs(GT_MOD)); tree->ChangeOper(GT_UMOD, GenTree::PRESERVE_VN); return fgMorphSmpOp(tree, mac); } // Do not use optimizations (unlike UMOD's idiv optimizing during codegen) for signed mod. // A similar optimization for signed mod will not work for a negative perfectly divisible // HI-word. To make it correct, we would need to divide without the sign and then flip the // result sign after mod. This requires 18 opcodes + flow making it not worthy to inline. goto ASSIGN_HELPER_FOR_MOD; case GT_UMOD: #ifdef TARGET_ARMARCH // // Note for TARGET_ARMARCH we don't have a remainder instruction, so we don't do this optimization // #else // TARGET_XARCH // If this is an unsigned long mod with a constant divisor, // then don't morph to a helper call - it can be done faster inline using idiv. noway_assert(op2); if ((typ == TYP_LONG) && opts.OptEnabled(CLFLG_CONSTANTFOLD)) { if (op2->OperIs(GT_CNS_NATIVELONG) && op2->AsIntConCommon()->LngValue() >= 2 && op2->AsIntConCommon()->LngValue() <= 0x3fffffff) { tree->AsOp()->gtOp1 = op1 = fgMorphTree(op1); noway_assert(op1->TypeIs(TYP_LONG)); // Update flags for op1 morph. tree->gtFlags &= ~GTF_ALL_EFFECT; // Only update with op1 as op2 is a constant. tree->gtFlags |= (op1->gtFlags & GTF_ALL_EFFECT); // If op1 is a constant, then do constant folding of the division operator. if (op1->OperIs(GT_CNS_NATIVELONG)) { tree = gtFoldExpr(tree); } if (!tree->OperIsConst()) { tree->AsOp()->CheckDivideByConstOptimized(this); } return tree; } } #endif // TARGET_XARCH ASSIGN_HELPER_FOR_MOD: // For "val % 1", return 0 if op1 doesn't have any side effects // and we are not in the CSE phase, we cannot discard 'tree' // because it may contain CSE expressions that we haven't yet examined. // if (((op1->gtFlags & GTF_SIDE_EFFECT) == 0) && !optValnumCSE_phase) { if (op2->IsIntegralConst(1)) { GenTree* zeroNode = gtNewZeroConNode(typ); #ifdef DEBUG zeroNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif DEBUG_DESTROY_NODE(tree); return zeroNode; } } #ifndef TARGET_64BIT if (typ == TYP_LONG) { helper = (oper == GT_UMOD) ? CORINFO_HELP_ULMOD : CORINFO_HELP_LMOD; goto USE_HELPER_FOR_ARITH; } #if USE_HELPERS_FOR_INT_DIV if (typ == TYP_INT) { if (oper == GT_UMOD) { helper = CORINFO_HELP_UMOD; goto USE_HELPER_FOR_ARITH; } else if (oper == GT_MOD) { helper = CORINFO_HELP_MOD; goto USE_HELPER_FOR_ARITH; } } #endif #endif // !TARGET_64BIT #ifdef TARGET_ARM64 // For ARM64 we don't have a remainder instruction, // The architecture manual suggests the following transformation to // generate code for such operator: // // a % b = a - (a / b) * b; // // TODO: there are special cases where it can be done better, for example // when the modulo operation is unsigned and the divisor is a // integer constant power of two. In this case, we can make the transform: // // a % b = a & (b - 1); // // Lower supports it for all cases except when `a` is constant, but // in Morph we can't guarantee that `a` won't be transformed into a constant, // so can't guarantee that lower will be able to do this optimization. { // Do "a % b = a - (a / b) * b" morph always, see TODO before this block. bool doMorphModToSubMulDiv = true; if (doMorphModToSubMulDiv) { assert(!optValnumCSE_phase); tree = fgMorphModToSubMulDiv(tree->AsOp()); op1 = tree->AsOp()->gtOp1; op2 = tree->AsOp()->gtOp2; } } #else // !TARGET_ARM64 // If b is not a power of 2 constant then lowering replaces a % b // with a - (a / b) * b and applies magic division optimization to // a / b. The code may already contain an a / b expression (e.g. // x = a / 10; y = a % 10;) and then we end up with redundant code. // If we convert % to / here we give CSE the opportunity to eliminate // the redundant division. If there's no redundant division then // nothing is lost, lowering would have done this transform anyway. if (!optValnumCSE_phase && ((tree->OperGet() == GT_MOD) && op2->IsIntegralConst())) { ssize_t divisorValue = op2->AsIntCon()->IconValue(); size_t absDivisorValue = (divisorValue == SSIZE_T_MIN) ? static_cast<size_t>(divisorValue) : static_cast<size_t>(abs(divisorValue)); if (!isPow2(absDivisorValue)) { tree = fgMorphModToSubMulDiv(tree->AsOp()); op1 = tree->AsOp()->gtOp1; op2 = tree->AsOp()->gtOp2; } } #endif // !TARGET_ARM64 break; USE_HELPER_FOR_ARITH: { // TODO: this comment is wrong now, do an appropriate fix. /* We have to morph these arithmetic operations into helper calls before morphing the arguments (preorder), else the arguments won't get correct values of fgPtrArgCntCur. However, try to fold the tree first in case we end up with a simple node which won't need a helper call at all */ noway_assert(tree->OperIsBinary()); GenTree* oldTree = tree; tree = gtFoldExpr(tree); // Were we able to fold it ? // Note that gtFoldExpr may return a non-leaf even if successful // e.g. for something like "expr / 1" - see also bug #290853 if (tree->OperIsLeaf() || (oldTree != tree)) { return (oldTree != tree) ? fgMorphTree(tree) : fgMorphLeaf(tree); } // Did we fold it into a comma node with throw? if (tree->gtOper == GT_COMMA) { noway_assert(fgIsCommaThrow(tree)); return fgMorphTree(tree); } } return fgMorphIntoHelperCall(tree, helper, gtNewCallArgs(op1, op2)); case GT_RETURN: if (!tree->TypeIs(TYP_VOID)) { if (op1->OperIs(GT_OBJ, GT_BLK, GT_IND)) { op1 = fgMorphRetInd(tree->AsUnOp()); } if (op1->OperIs(GT_LCL_VAR)) { // With a `genReturnBB` this `RETURN(src)` tree will be replaced by a `ASG(genReturnLocal, src)` // and `ASG` will be tranformed into field by field copy without parent local referencing if // possible. GenTreeLclVar* lclVar = op1->AsLclVar(); unsigned lclNum = lclVar->GetLclNum(); if ((genReturnLocal == BAD_VAR_NUM) || (genReturnLocal == lclNum)) { LclVarDsc* varDsc = lvaGetDesc(lclVar); if (varDsc->CanBeReplacedWithItsField(this)) { // We can replace the struct with its only field and allow copy propagation to replace // return value that was written as a field. unsigned fieldLclNum = varDsc->lvFieldLclStart; LclVarDsc* fieldDsc = lvaGetDesc(fieldLclNum); JITDUMP("Replacing an independently promoted local var V%02u with its only field " "V%02u for " "the return [%06u]\n", lclVar->GetLclNum(), fieldLclNum, dspTreeID(tree)); lclVar->SetLclNum(fieldLclNum); lclVar->ChangeType(fieldDsc->lvType); } } } } // normalize small integer return values if (fgGlobalMorph && varTypeIsSmall(info.compRetType) && (op1 != nullptr) && !op1->TypeIs(TYP_VOID) && fgCastNeeded(op1, info.compRetType)) { // Small-typed return values are normalized by the callee op1 = gtNewCastNode(TYP_INT, op1, false, info.compRetType); // Propagate GTF_COLON_COND op1->gtFlags |= (tree->gtFlags & GTF_COLON_COND); tree->AsOp()->gtOp1 = fgMorphTree(op1); // Propagate side effect flags tree->SetAllEffectsFlags(tree->AsOp()->gtGetOp1()); return tree; } break; case GT_EQ: case GT_NE: { GenTree* optimizedTree = gtFoldTypeCompare(tree); if (optimizedTree != tree) { return fgMorphTree(optimizedTree); } // Pattern-matching optimization: // (a % c) ==/!= 0 // for power-of-2 constant `c` // => // a & (c - 1) ==/!= 0 // For integer `a`, even if negative. if (opts.OptimizationEnabled() && !optValnumCSE_phase) { assert(tree->OperIs(GT_EQ, GT_NE)); if (op1->OperIs(GT_MOD) && varTypeIsIntegral(op1) && op2->IsIntegralConst(0)) { GenTree* op1op2 = op1->AsOp()->gtOp2; if (op1op2->IsCnsIntOrI()) { const ssize_t modValue = op1op2->AsIntCon()->IconValue(); if (isPow2(modValue)) { JITDUMP("\nTransforming:\n"); DISPTREE(tree); op1->SetOper(GT_AND); // Change % => & op1op2->AsIntConCommon()->SetIconValue(modValue - 1); // Change c => c - 1 fgUpdateConstTreeValueNumber(op1op2); JITDUMP("\ninto:\n"); DISPTREE(tree); } } } } } FALLTHROUGH; case GT_GT: { // Try and optimize nullable boxes feeding compares GenTree* optimizedTree = gtFoldBoxNullable(tree); if (optimizedTree->OperGet() != tree->OperGet()) { return optimizedTree; } else { tree = optimizedTree; } op1 = tree->AsOp()->gtOp1; op2 = tree->gtGetOp2IfPresent(); break; } case GT_RUNTIMELOOKUP: return fgMorphTree(op1); #ifdef TARGET_ARM case GT_INTRINSIC: if (tree->AsIntrinsic()->gtIntrinsicName == NI_System_Math_Round) { switch (tree->TypeGet()) { case TYP_DOUBLE: return fgMorphIntoHelperCall(tree, CORINFO_HELP_DBLROUND, gtNewCallArgs(op1)); case TYP_FLOAT: return fgMorphIntoHelperCall(tree, CORINFO_HELP_FLTROUND, gtNewCallArgs(op1)); default: unreached(); } } break; #endif case GT_PUTARG_TYPE: return fgMorphTree(tree->AsUnOp()->gtGetOp1()); case GT_NULLCHECK: { op1 = tree->AsUnOp()->gtGetOp1(); if (op1->IsCall()) { GenTreeCall* const call = op1->AsCall(); if (call->IsHelperCall() && s_helperCallProperties.NonNullReturn(eeGetHelperNum(call->gtCallMethHnd))) { JITDUMP("\nNULLCHECK on [%06u] will always succeed\n", dspTreeID(call)); // TODO: Can we also remove the call? // return fgMorphTree(call); } } } break; default: break; } if (opts.OptimizationEnabled() && fgGlobalMorph) { GenTree* morphed = fgMorphReduceAddOps(tree); if (morphed != tree) return fgMorphTree(morphed); } /*------------------------------------------------------------------------- * Process the first operand, if any */ if (op1) { // If we are entering the "then" part of a Qmark-Colon we must // save the state of the current copy assignment table // so that we can restore this state when entering the "else" part if (isQmarkColon) { noway_assert(optLocalAssertionProp); if (optAssertionCount) { noway_assert(optAssertionCount <= optMaxAssertionCount); // else ALLOCA() is a bad idea unsigned tabSize = optAssertionCount * sizeof(AssertionDsc); origAssertionTab = (AssertionDsc*)ALLOCA(tabSize); origAssertionCount = optAssertionCount; memcpy(origAssertionTab, optAssertionTabPrivate, tabSize); } else { origAssertionCount = 0; origAssertionTab = nullptr; } } // We might need a new MorphAddressContext context. (These are used to convey // parent context about how addresses being calculated will be used; see the // specification comment for MorphAddrContext for full details.) // Assume it's an Ind context to start. MorphAddrContext subIndMac1(MACK_Ind); MorphAddrContext* subMac1 = mac; if (subMac1 == nullptr || subMac1->m_kind == MACK_Ind) { switch (tree->gtOper) { case GT_ADDR: // A non-null mac here implies this node is part of an address computation. // If so, we need to pass the existing mac down to the child node. // // Otherwise, use a new mac. if (subMac1 == nullptr) { subMac1 = &subIndMac1; subMac1->m_kind = MACK_Addr; } break; case GT_COMMA: // In a comma, the incoming context only applies to the rightmost arg of the // comma list. The left arg (op1) gets a fresh context. subMac1 = nullptr; break; case GT_OBJ: case GT_BLK: case GT_IND: // A non-null mac here implies this node is part of an address computation (the tree parent is // GT_ADDR). // If so, we need to pass the existing mac down to the child node. // // Otherwise, use a new mac. if (subMac1 == nullptr) { subMac1 = &subIndMac1; } break; default: break; } } // For additions, if we're in an IND context keep track of whether // all offsets added to the address are constant, and their sum. if (tree->gtOper == GT_ADD && subMac1 != nullptr) { assert(subMac1->m_kind == MACK_Ind || subMac1->m_kind == MACK_Addr); // Can't be a CopyBlock. GenTree* otherOp = tree->AsOp()->gtOp2; // Is the other operator a constant? if (otherOp->IsCnsIntOrI()) { ClrSafeInt<size_t> totalOffset(subMac1->m_totalOffset); totalOffset += otherOp->AsIntConCommon()->IconValue(); if (totalOffset.IsOverflow()) { // We will consider an offset so large as to overflow as "not a constant" -- // we will do a null check. subMac1->m_allConstantOffsets = false; } else { subMac1->m_totalOffset += otherOp->AsIntConCommon()->IconValue(); } } else { subMac1->m_allConstantOffsets = false; } } // If op1 is a GT_FIELD or indir, we need to pass down the mac if // its parent is GT_ADDR, since the address of op1 // is part of an ongoing address computation. Otherwise // op1 represents the value of the field and so any address // calculations it does are in a new context. if (((op1->gtOper == GT_FIELD) || op1->OperIsIndir()) && (tree->gtOper != GT_ADDR)) { subMac1 = nullptr; // The impact of op1's value to any ongoing // address computation is handled below when looking // at op2. } tree->AsOp()->gtOp1 = op1 = fgMorphTree(op1, subMac1); // If we are exiting the "then" part of a Qmark-Colon we must // save the state of the current copy assignment table // so that we can merge this state with the "else" part exit if (isQmarkColon) { noway_assert(optLocalAssertionProp); if (optAssertionCount) { noway_assert(optAssertionCount <= optMaxAssertionCount); // else ALLOCA() is a bad idea unsigned tabSize = optAssertionCount * sizeof(AssertionDsc); thenAssertionTab = (AssertionDsc*)ALLOCA(tabSize); thenAssertionCount = optAssertionCount; memcpy(thenAssertionTab, optAssertionTabPrivate, tabSize); } else { thenAssertionCount = 0; thenAssertionTab = nullptr; } } /* Morphing along with folding and inlining may have changed the * side effect flags, so we have to reset them * * NOTE: Don't reset the exception flags on nodes that may throw */ assert(tree->gtOper != GT_CALL); if (!tree->OperRequiresCallFlag(this)) { tree->gtFlags &= ~GTF_CALL; } /* Propagate the new flags */ tree->gtFlags |= (op1->gtFlags & GTF_ALL_EFFECT); // &aliasedVar doesn't need GTF_GLOB_REF, though alisasedVar does // Similarly for clsVar if (oper == GT_ADDR && (op1->gtOper == GT_LCL_VAR || op1->gtOper == GT_CLS_VAR)) { tree->gtFlags &= ~GTF_GLOB_REF; } } // if (op1) /*------------------------------------------------------------------------- * Process the second operand, if any */ if (op2) { // If we are entering the "else" part of a Qmark-Colon we must // reset the state of the current copy assignment table if (isQmarkColon) { noway_assert(optLocalAssertionProp); optAssertionReset(0); if (origAssertionCount) { size_t tabSize = origAssertionCount * sizeof(AssertionDsc); memcpy(optAssertionTabPrivate, origAssertionTab, tabSize); optAssertionReset(origAssertionCount); } } // We might need a new MorphAddressContext context to use in evaluating op2. // (These are used to convey parent context about how addresses being calculated // will be used; see the specification comment for MorphAddrContext for full details.) // Assume it's an Ind context to start. switch (tree->gtOper) { case GT_ADD: if (mac != nullptr && mac->m_kind == MACK_Ind) { GenTree* otherOp = tree->AsOp()->gtOp1; // Is the other operator a constant? if (otherOp->IsCnsIntOrI()) { mac->m_totalOffset += otherOp->AsIntConCommon()->IconValue(); } else { mac->m_allConstantOffsets = false; } } break; default: break; } // If op2 is a GT_FIELD or indir, we must be taking its value, // so it should evaluate its address in a new context. if ((op2->gtOper == GT_FIELD) || op2->OperIsIndir()) { // The impact of op2's value to any ongoing // address computation is handled above when looking // at op1. mac = nullptr; } tree->AsOp()->gtOp2 = op2 = fgMorphTree(op2, mac); /* Propagate the side effect flags from op2 */ tree->gtFlags |= (op2->gtFlags & GTF_ALL_EFFECT); // If we are exiting the "else" part of a Qmark-Colon we must // merge the state of the current copy assignment table with // that of the exit of the "then" part. if (isQmarkColon) { noway_assert(optLocalAssertionProp); // If either exit table has zero entries then // the merged table also has zero entries if (optAssertionCount == 0 || thenAssertionCount == 0) { optAssertionReset(0); } else { size_t tabSize = optAssertionCount * sizeof(AssertionDsc); if ((optAssertionCount != thenAssertionCount) || (memcmp(thenAssertionTab, optAssertionTabPrivate, tabSize) != 0)) { // Yes they are different so we have to find the merged set // Iterate over the copy asgn table removing any entries // that do not have an exact match in the thenAssertionTab AssertionIndex index = 1; while (index <= optAssertionCount) { AssertionDsc* curAssertion = optGetAssertion(index); for (unsigned j = 0; j < thenAssertionCount; j++) { AssertionDsc* thenAssertion = &thenAssertionTab[j]; // Do the left sides match? if ((curAssertion->op1.lcl.lclNum == thenAssertion->op1.lcl.lclNum) && (curAssertion->assertionKind == thenAssertion->assertionKind)) { // Do the right sides match? if ((curAssertion->op2.kind == thenAssertion->op2.kind) && (curAssertion->op2.lconVal == thenAssertion->op2.lconVal)) { goto KEEP; } else { goto REMOVE; } } } // // If we fall out of the loop above then we didn't find // any matching entry in the thenAssertionTab so it must // have been killed on that path so we remove it here // REMOVE: // The data at optAssertionTabPrivate[i] is to be removed CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if (verbose) { printf("The QMARK-COLON "); printTreeID(tree); printf(" removes assertion candidate #%d\n", index); } #endif optAssertionRemove(index); continue; KEEP: // The data at optAssertionTabPrivate[i] is to be kept index++; } } } } } // if (op2) #ifndef TARGET_64BIT DONE_MORPHING_CHILDREN: #endif // !TARGET_64BIT if (tree->OperIsIndirOrArrLength()) { tree->SetIndirExceptionFlags(this); } else { if (tree->OperMayThrow(this)) { // Mark the tree node as potentially throwing an exception tree->gtFlags |= GTF_EXCEPT; } else { if (((op1 == nullptr) || ((op1->gtFlags & GTF_EXCEPT) == 0)) && ((op2 == nullptr) || ((op2->gtFlags & GTF_EXCEPT) == 0))) { tree->gtFlags &= ~GTF_EXCEPT; } } } if (tree->OperRequiresAsgFlag()) { tree->gtFlags |= GTF_ASG; } else { if (((op1 == nullptr) || ((op1->gtFlags & GTF_ASG) == 0)) && ((op2 == nullptr) || ((op2->gtFlags & GTF_ASG) == 0))) { tree->gtFlags &= ~GTF_ASG; } } if (tree->OperRequiresCallFlag(this)) { tree->gtFlags |= GTF_CALL; } else { if (((op1 == nullptr) || ((op1->gtFlags & GTF_CALL) == 0)) && ((op2 == nullptr) || ((op2->gtFlags & GTF_CALL) == 0))) { tree->gtFlags &= ~GTF_CALL; } } /*------------------------------------------------------------------------- * Now do POST-ORDER processing */ if (varTypeIsGC(tree->TypeGet()) && (op1 && !varTypeIsGC(op1->TypeGet())) && (op2 && !varTypeIsGC(op2->TypeGet()))) { // The tree is really not GC but was marked as such. Now that the // children have been unmarked, unmark the tree too. // Remember that GT_COMMA inherits it's type only from op2 if (tree->gtOper == GT_COMMA) { tree->gtType = genActualType(op2->TypeGet()); } else { tree->gtType = genActualType(op1->TypeGet()); } } GenTree* oldTree = tree; GenTree* qmarkOp1 = nullptr; GenTree* qmarkOp2 = nullptr; if ((tree->OperGet() == GT_QMARK) && (tree->AsOp()->gtOp2->OperGet() == GT_COLON)) { qmarkOp1 = oldTree->AsOp()->gtOp2->AsOp()->gtOp1; qmarkOp2 = oldTree->AsOp()->gtOp2->AsOp()->gtOp2; } // Try to fold it, maybe we get lucky, tree = gtFoldExpr(tree); if (oldTree != tree) { /* if gtFoldExpr returned op1 or op2 then we are done */ if ((tree == op1) || (tree == op2) || (tree == qmarkOp1) || (tree == qmarkOp2)) { return tree; } /* If we created a comma-throw tree then we need to morph op1 */ if (fgIsCommaThrow(tree)) { tree->AsOp()->gtOp1 = fgMorphTree(tree->AsOp()->gtOp1); fgMorphTreeDone(tree); return tree; } return tree; } else if (tree->OperIsConst()) { return tree; } /* gtFoldExpr could have used setOper to change the oper */ oper = tree->OperGet(); typ = tree->TypeGet(); /* gtFoldExpr could have changed op1 and op2 */ op1 = tree->AsOp()->gtOp1; op2 = tree->gtGetOp2IfPresent(); // Do we have an integer compare operation? // if (tree->OperIsCompare() && varTypeIsIntegralOrI(tree->TypeGet())) { // Are we comparing against zero? // if (op2->IsIntegralConst(0)) { // Request that the codegen for op1 sets the condition flags // when it generates the code for op1. // // Codegen for op1 must set the condition flags if // this method returns true. // op1->gtRequestSetFlags(); } } /*------------------------------------------------------------------------- * Perform the required oper-specific postorder morphing */ GenTree* temp; size_t ival1; GenTree* lclVarTree; GenTree* effectiveOp1; FieldSeqNode* fieldSeq = nullptr; switch (oper) { case GT_ASG: if (op1->OperIs(GT_LCL_VAR) && ((op1->gtFlags & GTF_VAR_FOLDED_IND) != 0)) { op1->gtFlags &= ~GTF_VAR_FOLDED_IND; tree = fgDoNormalizeOnStore(tree); op2 = tree->gtGetOp2(); } lclVarTree = fgIsIndirOfAddrOfLocal(op1); if (lclVarTree != nullptr) { lclVarTree->gtFlags |= GTF_VAR_DEF; } effectiveOp1 = op1->gtEffectiveVal(); // If we are storing a small type, we might be able to omit a cast. if (effectiveOp1->OperIs(GT_IND, GT_CLS_VAR) && varTypeIsSmall(effectiveOp1)) { if (!gtIsActiveCSE_Candidate(op2) && op2->OperIs(GT_CAST) && varTypeIsIntegral(op2->AsCast()->CastOp()) && !op2->gtOverflow()) { var_types castType = op2->CastToType(); // If we are performing a narrowing cast and // castType is larger or the same as op1's type // then we can discard the cast. if (varTypeIsSmall(castType) && (genTypeSize(castType) >= genTypeSize(effectiveOp1))) { tree->AsOp()->gtOp2 = op2 = op2->AsCast()->CastOp(); } } } fgAssignSetVarDef(tree); /* We can't CSE the LHS of an assignment */ /* We also must set in the pre-morphing phase, otherwise assertionProp doesn't see it */ if (op1->IsLocal() || (op1->TypeGet() != TYP_STRUCT)) { op1->gtFlags |= GTF_DONT_CSE; } break; case GT_CAST: tree = fgOptimizeCast(tree->AsCast()); if (!tree->OperIsSimple()) { return tree; } if (tree->OperIs(GT_CAST) && tree->gtOverflow()) { fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_OVERFLOW); } typ = tree->TypeGet(); oper = tree->OperGet(); op1 = tree->AsOp()->gtGetOp1(); op2 = tree->gtGetOp2IfPresent(); break; case GT_EQ: case GT_NE: // It is not safe to reorder/delete CSE's if (!optValnumCSE_phase && op2->IsIntegralConst()) { tree = fgOptimizeEqualityComparisonWithConst(tree->AsOp()); assert(tree->OperIsCompare()); oper = tree->OperGet(); op1 = tree->gtGetOp1(); op2 = tree->gtGetOp2(); } goto COMPARE; case GT_LT: case GT_LE: case GT_GE: case GT_GT: if (!optValnumCSE_phase && (op1->OperIs(GT_CAST) || op2->OperIs(GT_CAST))) { tree = fgOptimizeRelationalComparisonWithCasts(tree->AsOp()); oper = tree->OperGet(); op1 = tree->gtGetOp1(); op2 = tree->gtGetOp2(); } // op2's value may be changed, so it cannot be a CSE candidate. if (op2->IsIntegralConst() && !gtIsActiveCSE_Candidate(op2)) { tree = fgOptimizeRelationalComparisonWithConst(tree->AsOp()); oper = tree->OperGet(); assert(op1 == tree->AsOp()->gtGetOp1()); assert(op2 == tree->AsOp()->gtGetOp2()); } COMPARE: noway_assert(tree->OperIsCompare()); break; case GT_MUL: #ifndef TARGET_64BIT if (typ == TYP_LONG) { // This must be GTF_MUL_64RSLT INDEBUG(tree->AsOp()->DebugCheckLongMul()); return tree; } #endif // TARGET_64BIT goto CM_OVF_OP; case GT_SUB: if (tree->gtOverflow()) { goto CM_OVF_OP; } // TODO #4104: there are a lot of other places where // this condition is not checked before transformations. if (fgGlobalMorph) { /* Check for "op1 - cns2" , we change it to "op1 + (-cns2)" */ noway_assert(op2); if (op2->IsCnsIntOrI() && !op2->IsIconHandle()) { // Negate the constant and change the node to be "+", // except when `op2` is a const byref. op2->AsIntConCommon()->SetIconValue(-op2->AsIntConCommon()->IconValue()); op2->AsIntConRef().gtFieldSeq = FieldSeqStore::NotAField(); oper = GT_ADD; tree->ChangeOper(oper); goto CM_ADD_OP; } /* Check for "cns1 - op2" , we change it to "(cns1 + (-op2))" */ noway_assert(op1); if (op1->IsCnsIntOrI()) { noway_assert(varTypeIsIntOrI(tree)); // The type of the new GT_NEG node cannot just be op2->TypeGet(). // Otherwise we may sign-extend incorrectly in cases where the GT_NEG // node ends up feeding directly into a cast, for example in // GT_CAST<ubyte>(GT_SUB(0, s_1.ubyte)) tree->AsOp()->gtOp2 = op2 = gtNewOperNode(GT_NEG, genActualType(op2->TypeGet()), op2); fgMorphTreeDone(op2); oper = GT_ADD; tree->ChangeOper(oper); goto CM_ADD_OP; } /* No match - exit */ } // Skip optimization if non-NEG operand is constant. // Both op1 and op2 are not constant because it was already checked above. if (opts.OptimizationEnabled() && fgGlobalMorph) { // a - -b = > a + b // SUB(a, (NEG(b)) => ADD(a, b) if (!op1->OperIs(GT_NEG) && op2->OperIs(GT_NEG)) { // tree: SUB // op1: a // op2: NEG // op2Child: b GenTree* op2Child = op2->AsOp()->gtOp1; // b oper = GT_ADD; tree->SetOper(oper, GenTree::PRESERVE_VN); tree->AsOp()->gtOp2 = op2Child; DEBUG_DESTROY_NODE(op2); op2 = op2Child; } // -a - -b = > b - a // SUB(NEG(a), (NEG(b)) => SUB(b, a) else if (op1->OperIs(GT_NEG) && op2->OperIs(GT_NEG) && gtCanSwapOrder(op1, op2)) { // tree: SUB // op1: NEG // op1Child: a // op2: NEG // op2Child: b GenTree* op1Child = op1->AsOp()->gtOp1; // a GenTree* op2Child = op2->AsOp()->gtOp1; // b tree->AsOp()->gtOp1 = op2Child; tree->AsOp()->gtOp2 = op1Child; DEBUG_DESTROY_NODE(op1); DEBUG_DESTROY_NODE(op2); op1 = op2Child; op2 = op1Child; } } break; #ifdef TARGET_ARM64 case GT_DIV: if (!varTypeIsFloating(tree->gtType)) { // Codegen for this instruction needs to be able to throw two exceptions: fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_OVERFLOW); fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_DIV_BY_ZERO); } break; case GT_UDIV: // Codegen for this instruction needs to be able to throw one exception: fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_DIV_BY_ZERO); break; #endif case GT_ADD: CM_OVF_OP: if (tree->gtOverflow()) { tree->gtRequestSetFlags(); // Add the excptn-throwing basic block to jump to on overflow fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_OVERFLOW); // We can't do any commutative morphing for overflow instructions break; } CM_ADD_OP: FALLTHROUGH; case GT_OR: case GT_XOR: case GT_AND: tree = fgOptimizeCommutativeArithmetic(tree->AsOp()); if (!tree->OperIsSimple()) { return tree; } typ = tree->TypeGet(); oper = tree->OperGet(); op1 = tree->gtGetOp1(); op2 = tree->gtGetOp2IfPresent(); break; case GT_NOT: case GT_NEG: // Remove double negation/not. // Note: this is not a safe tranformation if "tree" is a CSE candidate. // Consider for example the following expression: NEG(NEG(OP)), where any // NEG is a CSE candidate. Were we to morph this to just OP, CSE would fail to find // the original NEG in the statement. if (op1->OperIs(oper) && opts.OptimizationEnabled() && !gtIsActiveCSE_Candidate(tree) && !gtIsActiveCSE_Candidate(op1)) { JITDUMP("Remove double negation/not\n") GenTree* op1op1 = op1->gtGetOp1(); DEBUG_DESTROY_NODE(tree); DEBUG_DESTROY_NODE(op1); return op1op1; } // Distribute negation over simple multiplication/division expressions if (opts.OptimizationEnabled() && !optValnumCSE_phase && tree->OperIs(GT_NEG) && op1->OperIs(GT_MUL, GT_DIV)) { GenTreeOp* mulOrDiv = op1->AsOp(); GenTree* op1op1 = mulOrDiv->gtGetOp1(); GenTree* op1op2 = mulOrDiv->gtGetOp2(); if (!op1op1->IsCnsIntOrI() && op1op2->IsCnsIntOrI() && !op1op2->IsIconHandle()) { // NEG(MUL(a, C)) => MUL(a, -C) // NEG(DIV(a, C)) => DIV(a, -C), except when C = {-1, 1} ssize_t constVal = op1op2->AsIntCon()->IconValue(); if ((mulOrDiv->OperIs(GT_DIV) && (constVal != -1) && (constVal != 1)) || (mulOrDiv->OperIs(GT_MUL) && !mulOrDiv->gtOverflow())) { GenTree* newOp1 = op1op1; // a GenTree* newOp2 = gtNewIconNode(-constVal, op1op2->TypeGet()); // -C mulOrDiv->gtOp1 = newOp1; mulOrDiv->gtOp2 = newOp2; mulOrDiv->SetVNsFromNode(tree); DEBUG_DESTROY_NODE(tree); DEBUG_DESTROY_NODE(op1op2); return mulOrDiv; } } } /* Any constant cases should have been folded earlier */ noway_assert(!op1->OperIsConst() || !opts.OptEnabled(CLFLG_CONSTANTFOLD) || optValnumCSE_phase); break; case GT_CKFINITE: noway_assert(varTypeIsFloating(op1->TypeGet())); fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_ARITH_EXCPN); break; case GT_BOUNDS_CHECK: fgSetRngChkTarget(tree); break; case GT_OBJ: case GT_BLK: case GT_IND: { // If we have IND(ADDR(X)) and X has GTF_GLOB_REF, we must set GTF_GLOB_REF on // the OBJ. Note that the GTF_GLOB_REF will have been cleared on ADDR(X) where X // is a local or CLS_VAR, even if it has been address-exposed. if (op1->OperIs(GT_ADDR)) { tree->gtFlags |= (op1->AsUnOp()->gtGetOp1()->gtFlags & GTF_GLOB_REF); } if (!tree->OperIs(GT_IND)) { break; } // Can not remove a GT_IND if it is currently a CSE candidate. if (gtIsActiveCSE_Candidate(tree)) { break; } bool foldAndReturnTemp = false; temp = nullptr; ival1 = 0; // Don't remove a volatile GT_IND, even if the address points to a local variable. if ((tree->gtFlags & GTF_IND_VOLATILE) == 0) { /* Try to Fold *(&X) into X */ if (op1->gtOper == GT_ADDR) { // Can not remove a GT_ADDR if it is currently a CSE candidate. if (gtIsActiveCSE_Candidate(op1)) { break; } temp = op1->AsOp()->gtOp1; // X // In the test below, if they're both TYP_STRUCT, this of course does *not* mean that // they are the *same* struct type. In fact, they almost certainly aren't. If the // address has an associated field sequence, that identifies this case; go through // the "lcl_fld" path rather than this one. FieldSeqNode* addrFieldSeq = nullptr; // This is an unused out parameter below. if (typ == temp->TypeGet() && !GetZeroOffsetFieldMap()->Lookup(op1, &addrFieldSeq)) { foldAndReturnTemp = true; } else if (temp->OperIsLocal()) { unsigned lclNum = temp->AsLclVarCommon()->GetLclNum(); LclVarDsc* varDsc = lvaGetDesc(lclNum); // We will try to optimize when we have a promoted struct promoted with a zero lvFldOffset if (varDsc->lvPromoted && (varDsc->lvFldOffset == 0)) { noway_assert(varTypeIsStruct(varDsc)); // We will try to optimize when we have a single field struct that is being struct promoted if (varDsc->lvFieldCnt == 1) { unsigned lclNumFld = varDsc->lvFieldLclStart; // just grab the promoted field LclVarDsc* fieldVarDsc = lvaGetDesc(lclNumFld); // Also make sure that the tree type matches the fieldVarType and that it's lvFldOffset // is zero if (fieldVarDsc->TypeGet() == typ && (fieldVarDsc->lvFldOffset == 0)) { // We can just use the existing promoted field LclNum temp->AsLclVarCommon()->SetLclNum(lclNumFld); temp->gtType = fieldVarDsc->TypeGet(); foldAndReturnTemp = true; } } } // If the type of the IND (typ) is a "small int", and the type of the local has the // same width, then we can reduce to just the local variable -- it will be // correctly normalized. // // The below transformation cannot be applied if the local var needs to be normalized on load. else if (varTypeIsSmall(typ) && (genTypeSize(varDsc) == genTypeSize(typ)) && !lvaTable[lclNum].lvNormalizeOnLoad()) { const bool definitelyLoad = (tree->gtFlags & GTF_DONT_CSE) == 0; const bool possiblyStore = !definitelyLoad; if (possiblyStore || (varTypeIsUnsigned(varDsc) == varTypeIsUnsigned(typ))) { typ = temp->TypeGet(); tree->gtType = typ; foldAndReturnTemp = true; if (possiblyStore) { // This node can be on the left-hand-side of an assignment node. // Mark this node with GTF_VAR_FOLDED_IND to make sure that fgDoNormalizeOnStore() // is called on its parent in post-order morph. temp->gtFlags |= GTF_VAR_FOLDED_IND; } } } // For matching types we can fold else if (!varTypeIsStruct(typ) && (lvaTable[lclNum].lvType == typ) && !lvaTable[lclNum].lvNormalizeOnLoad()) { tree->gtType = typ = temp->TypeGet(); foldAndReturnTemp = true; } else { // Assumes that when Lookup returns "false" it will leave "fieldSeq" unmodified (i.e. // nullptr) assert(fieldSeq == nullptr); bool b = GetZeroOffsetFieldMap()->Lookup(op1, &fieldSeq); assert(b || fieldSeq == nullptr); if ((fieldSeq != nullptr) && (temp->OperGet() == GT_LCL_FLD)) { // Append the field sequence, change the type. temp->AsLclFld()->SetFieldSeq( GetFieldSeqStore()->Append(temp->AsLclFld()->GetFieldSeq(), fieldSeq)); temp->gtType = typ; foldAndReturnTemp = true; } } // Otherwise will will fold this into a GT_LCL_FLD below // where we check (temp != nullptr) } else // !temp->OperIsLocal() { // We don't try to fold away the GT_IND/GT_ADDR for this case temp = nullptr; } } else if (op1->OperGet() == GT_ADD) { #ifdef TARGET_ARM // Check for a misalignment floating point indirection. if (varTypeIsFloating(typ)) { GenTree* addOp2 = op1->AsOp()->gtGetOp2(); if (addOp2->IsCnsIntOrI()) { ssize_t offset = addOp2->AsIntCon()->gtIconVal; if ((offset % emitTypeSize(TYP_FLOAT)) != 0) { tree->gtFlags |= GTF_IND_UNALIGNED; } } } #endif // TARGET_ARM /* Try to change *(&lcl + cns) into lcl[cns] to prevent materialization of &lcl */ if (op1->AsOp()->gtOp1->OperGet() == GT_ADDR && op1->AsOp()->gtOp2->OperGet() == GT_CNS_INT && opts.OptimizationEnabled()) { // No overflow arithmetic with pointers noway_assert(!op1->gtOverflow()); temp = op1->AsOp()->gtOp1->AsOp()->gtOp1; if (!temp->OperIsLocal()) { temp = nullptr; break; } // Can not remove the GT_ADDR if it is currently a CSE candidate. if (gtIsActiveCSE_Candidate(op1->AsOp()->gtOp1)) { break; } ival1 = op1->AsOp()->gtOp2->AsIntCon()->gtIconVal; fieldSeq = op1->AsOp()->gtOp2->AsIntCon()->gtFieldSeq; // Does the address have an associated zero-offset field sequence? FieldSeqNode* addrFieldSeq = nullptr; if (GetZeroOffsetFieldMap()->Lookup(op1->AsOp()->gtOp1, &addrFieldSeq)) { fieldSeq = GetFieldSeqStore()->Append(addrFieldSeq, fieldSeq); } if (ival1 == 0 && typ == temp->TypeGet() && temp->TypeGet() != TYP_STRUCT) { noway_assert(!varTypeIsGC(temp->TypeGet())); foldAndReturnTemp = true; } else { // The emitter can't handle large offsets if (ival1 != (unsigned short)ival1) { break; } // The emitter can get confused by invalid offsets if (ival1 >= Compiler::lvaLclSize(temp->AsLclVarCommon()->GetLclNum())) { break; } } // Now we can fold this into a GT_LCL_FLD below // where we check (temp != nullptr) } } } // At this point we may have a lclVar or lclFld that might be foldable with a bit of extra massaging: // - We may have a load of a local where the load has a different type than the local // - We may have a load of a local plus an offset // // In these cases, we will change the lclVar or lclFld into a lclFld of the appropriate type and // offset if doing so is legal. The only cases in which this transformation is illegal are if the load // begins before the local or if the load extends beyond the end of the local (i.e. if the load is // out-of-bounds w.r.t. the local). if ((temp != nullptr) && !foldAndReturnTemp) { assert(temp->OperIsLocal()); const unsigned lclNum = temp->AsLclVarCommon()->GetLclNum(); LclVarDsc* const varDsc = lvaGetDesc(lclNum); const var_types tempTyp = temp->TypeGet(); const bool useExactSize = varTypeIsStruct(tempTyp) || (tempTyp == TYP_BLK) || (tempTyp == TYP_LCLBLK); const unsigned varSize = useExactSize ? varDsc->lvExactSize : genTypeSize(temp); // Make sure we do not enregister this lclVar. lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::LocalField)); // If the size of the load is greater than the size of the lclVar, we cannot fold this access into // a lclFld: the access represented by an lclFld node must begin at or after the start of the // lclVar and must not extend beyond the end of the lclVar. if ((ival1 >= 0) && ((ival1 + genTypeSize(typ)) <= varSize)) { GenTreeLclFld* lclFld; // We will turn a GT_LCL_VAR into a GT_LCL_FLD with an gtLclOffs of 'ival' // or if we already have a GT_LCL_FLD we will adjust the gtLclOffs by adding 'ival' // Then we change the type of the GT_LCL_FLD to match the orginal GT_IND type. // if (temp->OperGet() == GT_LCL_FLD) { lclFld = temp->AsLclFld(); lclFld->SetLclOffs(lclFld->GetLclOffs() + static_cast<unsigned>(ival1)); lclFld->SetFieldSeq(GetFieldSeqStore()->Append(lclFld->GetFieldSeq(), fieldSeq)); } else // We have a GT_LCL_VAR. { assert(temp->OperGet() == GT_LCL_VAR); temp->ChangeOper(GT_LCL_FLD); // Note that this makes the gtFieldSeq "NotAField". lclFld = temp->AsLclFld(); lclFld->SetLclOffs(static_cast<unsigned>(ival1)); if (fieldSeq != nullptr) { // If it does represent a field, note that. lclFld->SetFieldSeq(fieldSeq); } } temp->gtType = tree->gtType; foldAndReturnTemp = true; } } if (foldAndReturnTemp) { assert(temp != nullptr); assert(temp->TypeGet() == typ); assert((op1->OperGet() == GT_ADD) || (op1->OperGet() == GT_ADDR)); // Copy the value of GTF_DONT_CSE from the original tree to `temp`: it can be set for // 'temp' because a GT_ADDR always marks it for its operand. temp->gtFlags &= ~GTF_DONT_CSE; temp->gtFlags |= (tree->gtFlags & GTF_DONT_CSE); if (op1->OperGet() == GT_ADD) { DEBUG_DESTROY_NODE(op1->AsOp()->gtOp1); // GT_ADDR DEBUG_DESTROY_NODE(op1->AsOp()->gtOp2); // GT_CNS_INT } DEBUG_DESTROY_NODE(op1); // GT_ADD or GT_ADDR DEBUG_DESTROY_NODE(tree); // GT_IND // If the result of the fold is a local var, we may need to perform further adjustments e.g. for // normalization. if (temp->OperIs(GT_LCL_VAR)) { #ifdef DEBUG // We clear this flag on `temp` because `fgMorphLocalVar` may assert that this bit is clear // and the node in question must have this bit set (as it has already been morphed). temp->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED; #endif // DEBUG const bool forceRemorph = true; temp = fgMorphLocalVar(temp, forceRemorph); #ifdef DEBUG // We then set this flag on `temp` because `fgMorhpLocalVar` may not set it itself, and the // caller of `fgMorphSmpOp` may assert that this flag is set on `temp` once this function // returns. temp->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif // DEBUG } return temp; } // Only do this optimization when we are in the global optimizer. Doing this after value numbering // could result in an invalid value number for the newly generated GT_IND node. if ((op1->OperGet() == GT_COMMA) && fgGlobalMorph) { // Perform the transform IND(COMMA(x, ..., z)) == COMMA(x, ..., IND(z)). // TBD: this transformation is currently necessary for correctness -- it might // be good to analyze the failures that result if we don't do this, and fix them // in other ways. Ideally, this should be optional. GenTree* commaNode = op1; GenTreeFlags treeFlags = tree->gtFlags; commaNode->gtType = typ; commaNode->gtFlags = (treeFlags & ~GTF_REVERSE_OPS); // Bashing the GT_COMMA flags here is // dangerous, clear the GTF_REVERSE_OPS at // least. #ifdef DEBUG commaNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif while (commaNode->AsOp()->gtOp2->gtOper == GT_COMMA) { commaNode = commaNode->AsOp()->gtOp2; commaNode->gtType = typ; commaNode->gtFlags = (treeFlags & ~GTF_REVERSE_OPS & ~GTF_ASG & ~GTF_CALL); // Bashing the GT_COMMA flags here is // dangerous, clear the GTF_REVERSE_OPS, GT_ASG, and GT_CALL at // least. commaNode->gtFlags |= ((commaNode->AsOp()->gtOp1->gtFlags | commaNode->AsOp()->gtOp2->gtFlags) & (GTF_ASG | GTF_CALL)); #ifdef DEBUG commaNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif } bool wasArrIndex = (tree->gtFlags & GTF_IND_ARR_INDEX) != 0; ArrayInfo arrInfo; if (wasArrIndex) { bool b = GetArrayInfoMap()->Lookup(tree, &arrInfo); assert(b); GetArrayInfoMap()->Remove(tree); } tree = op1; GenTree* addr = commaNode->AsOp()->gtOp2; // TODO-1stClassStructs: we often create a struct IND without a handle, fix it. op1 = gtNewIndir(typ, addr); // This is very conservative op1->gtFlags |= treeFlags & ~GTF_ALL_EFFECT & ~GTF_IND_NONFAULTING; op1->gtFlags |= (addr->gtFlags & GTF_ALL_EFFECT); if (wasArrIndex) { GetArrayInfoMap()->Set(op1, arrInfo); } #ifdef DEBUG op1->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif commaNode->AsOp()->gtOp2 = op1; commaNode->gtFlags |= (op1->gtFlags & GTF_ALL_EFFECT); return tree; } break; } case GT_ADDR: // Can not remove op1 if it is currently a CSE candidate. if (gtIsActiveCSE_Candidate(op1)) { break; } if (op1->OperGet() == GT_IND) { if ((op1->gtFlags & GTF_IND_ARR_INDEX) == 0) { // Can not remove a GT_ADDR if it is currently a CSE candidate. if (gtIsActiveCSE_Candidate(tree)) { break; } // Perform the transform ADDR(IND(...)) == (...). GenTree* addr = op1->AsOp()->gtOp1; // If tree has a zero field sequence annotation, update the annotation // on addr node. FieldSeqNode* zeroFieldSeq = nullptr; if (GetZeroOffsetFieldMap()->Lookup(tree, &zeroFieldSeq)) { fgAddFieldSeqForZeroOffset(addr, zeroFieldSeq); } noway_assert(varTypeIsGC(addr->gtType) || addr->gtType == TYP_I_IMPL); DEBUG_DESTROY_NODE(op1); DEBUG_DESTROY_NODE(tree); return addr; } } else if (op1->OperGet() == GT_OBJ) { // Can not remove a GT_ADDR if it is currently a CSE candidate. if (gtIsActiveCSE_Candidate(tree)) { break; } // Perform the transform ADDR(OBJ(...)) == (...). GenTree* addr = op1->AsObj()->Addr(); noway_assert(varTypeIsGC(addr->gtType) || addr->gtType == TYP_I_IMPL); DEBUG_DESTROY_NODE(op1); DEBUG_DESTROY_NODE(tree); return addr; } else if ((op1->gtOper == GT_COMMA) && !optValnumCSE_phase) { // Perform the transform ADDR(COMMA(x, ..., z)) == COMMA(x, ..., ADDR(z)). // (Be sure to mark "z" as an l-value...) ArrayStack<GenTree*> commas(getAllocator(CMK_ArrayStack)); for (GenTree* comma = op1; comma != nullptr && comma->gtOper == GT_COMMA; comma = comma->gtGetOp2()) { commas.Push(comma); } GenTree* commaNode = commas.Top(); // The top-level addr might be annotated with a zeroOffset field. FieldSeqNode* zeroFieldSeq = nullptr; bool isZeroOffset = GetZeroOffsetFieldMap()->Lookup(tree, &zeroFieldSeq); tree = op1; commaNode->AsOp()->gtOp2->gtFlags |= GTF_DONT_CSE; // If the node we're about to put under a GT_ADDR is an indirection, it // doesn't need to be materialized, since we only want the addressing mode. Because // of this, this GT_IND is not a faulting indirection and we don't have to extract it // as a side effect. GenTree* commaOp2 = commaNode->AsOp()->gtOp2; if (commaOp2->OperIsBlk()) { commaOp2->SetOper(GT_IND); } if (commaOp2->gtOper == GT_IND) { commaOp2->gtFlags |= GTF_IND_NONFAULTING; commaOp2->gtFlags &= ~GTF_EXCEPT; commaOp2->gtFlags |= (commaOp2->AsOp()->gtOp1->gtFlags & GTF_EXCEPT); } op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, commaOp2); if (isZeroOffset) { // Transfer the annotation to the new GT_ADDR node. fgAddFieldSeqForZeroOffset(op1, zeroFieldSeq); } commaNode->AsOp()->gtOp2 = op1; // Originally, I gave all the comma nodes type "byref". But the ADDR(IND(x)) == x transform // might give op1 a type different from byref (like, say, native int). So now go back and give // all the comma nodes the type of op1. // TODO: the comma flag update below is conservative and can be improved. // For example, if we made the ADDR(IND(x)) == x transformation, we may be able to // get rid of some of the IND flags on the COMMA nodes (e.g., GTF_GLOB_REF). while (!commas.Empty()) { GenTree* comma = commas.Pop(); comma->gtType = op1->gtType; comma->gtFlags |= op1->gtFlags; #ifdef DEBUG comma->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif gtUpdateNodeSideEffects(comma); } return tree; } break; case GT_COLON: if (fgGlobalMorph) { /* Mark the nodes that are conditionally executed */ fgWalkTreePre(&tree, gtMarkColonCond); } /* Since we're doing this postorder we clear this if it got set by a child */ fgRemoveRestOfBlock = false; break; case GT_COMMA: /* Special case: trees that don't produce a value */ if (op2->OperIs(GT_ASG) || (op2->OperGet() == GT_COMMA && op2->TypeGet() == TYP_VOID) || fgIsThrow(op2)) { typ = tree->gtType = TYP_VOID; } // If we are in the Valuenum CSE phase then don't morph away anything as these // nodes may have CSE defs/uses in them. // if (!optValnumCSE_phase) { // Extract the side effects from the left side of the comma. Since they don't "go" anywhere, this // is all we need. GenTree* op1SideEffects = nullptr; // The addition of "GTF_MAKE_CSE" below prevents us from throwing away (for example) // hoisted expressions in loops. gtExtractSideEffList(op1, &op1SideEffects, (GTF_SIDE_EFFECT | GTF_MAKE_CSE)); if (op1SideEffects) { // Replace the left hand side with the side effect list. op1 = op1SideEffects; tree->AsOp()->gtOp1 = op1SideEffects; gtUpdateNodeSideEffects(tree); } else { op2->gtFlags |= (tree->gtFlags & (GTF_DONT_CSE | GTF_LATE_ARG)); DEBUG_DESTROY_NODE(tree); DEBUG_DESTROY_NODE(op1); return op2; } // If the right operand is just a void nop node, throw it away. Unless this is a // comma throw, in which case we want the top-level morphing loop to recognize it. if (op2->IsNothingNode() && op1->TypeIs(TYP_VOID) && !fgIsCommaThrow(tree)) { op1->gtFlags |= (tree->gtFlags & (GTF_DONT_CSE | GTF_LATE_ARG)); DEBUG_DESTROY_NODE(tree); DEBUG_DESTROY_NODE(op2); return op1; } } break; case GT_JTRUE: /* Special case if fgRemoveRestOfBlock is set to true */ if (fgRemoveRestOfBlock) { if (fgIsCommaThrow(op1, true)) { GenTree* throwNode = op1->AsOp()->gtOp1; JITDUMP("Removing [%06d] GT_JTRUE as the block now unconditionally throws an exception.\n", dspTreeID(tree)); DEBUG_DESTROY_NODE(tree); return throwNode; } noway_assert(op1->OperIsCompare()); noway_assert(op1->gtFlags & GTF_EXCEPT); // We need to keep op1 for the side-effects. Hang it off // a GT_COMMA node JITDUMP("Keeping side-effects by bashing [%06d] GT_JTRUE into a GT_COMMA.\n", dspTreeID(tree)); tree->ChangeOper(GT_COMMA); tree->AsOp()->gtOp2 = op2 = gtNewNothingNode(); // Additionally since we're eliminating the JTRUE // codegen won't like it if op1 is a RELOP of longs, floats or doubles. // So we change it into a GT_COMMA as well. JITDUMP("Also bashing [%06d] (a relop) into a GT_COMMA.\n", dspTreeID(op1)); op1->ChangeOper(GT_COMMA); op1->gtFlags &= ~GTF_UNSIGNED; // Clear the unsigned flag if it was set on the relop op1->gtType = op1->AsOp()->gtOp1->gtType; return tree; } break; case GT_INTRINSIC: if (tree->AsIntrinsic()->gtIntrinsicName == NI_System_Runtime_CompilerServices_RuntimeHelpers_IsKnownConstant) { // Should be expanded by the time it reaches CSE phase assert(!optValnumCSE_phase); JITDUMP("\nExpanding RuntimeHelpers.IsKnownConstant to "); if (op1->OperIsConst()) { // We're lucky to catch a constant here while importer was not JITDUMP("true\n"); DEBUG_DESTROY_NODE(tree, op1); tree = gtNewIconNode(1); } else { GenTree* op1SideEffects = nullptr; gtExtractSideEffList(op1, &op1SideEffects, GTF_ALL_EFFECT); if (op1SideEffects != nullptr) { DEBUG_DESTROY_NODE(tree); // Keep side-effects of op1 tree = gtNewOperNode(GT_COMMA, TYP_INT, op1SideEffects, gtNewIconNode(0)); JITDUMP("false with side effects:\n") DISPTREE(tree); } else { JITDUMP("false\n"); DEBUG_DESTROY_NODE(tree, op1); tree = gtNewIconNode(0); } } INDEBUG(tree->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED); return tree; } break; default: break; } assert(oper == tree->gtOper); // Propagate comma throws. // If we are in the Valuenum CSE phase then don't morph away anything as these // nodes may have CSE defs/uses in them. if (fgGlobalMorph && (oper != GT_ASG) && (oper != GT_COLON)) { if ((op1 != nullptr) && fgIsCommaThrow(op1, true)) { GenTree* propagatedThrow = fgPropagateCommaThrow(tree, op1->AsOp(), GTF_EMPTY); if (propagatedThrow != nullptr) { return propagatedThrow; } } if ((op2 != nullptr) && fgIsCommaThrow(op2, true)) { GenTree* propagatedThrow = fgPropagateCommaThrow(tree, op2->AsOp(), op1->gtFlags & GTF_ALL_EFFECT); if (propagatedThrow != nullptr) { return propagatedThrow; } } } /*------------------------------------------------------------------------- * Optional morphing is done if tree transformations is permitted */ if ((opts.compFlags & CLFLG_TREETRANS) == 0) { return tree; } tree = fgMorphSmpOpOptional(tree->AsOp()); return tree; } //------------------------------------------------------------------------ // fgOptimizeCast: Optimizes the supplied GT_CAST tree. // // Tries to get rid of the cast, its operand, the GTF_OVERFLOW flag, calls // calls "optNarrowTree". Called in post-order by "fgMorphSmpOp". // // Arguments: // tree - the cast tree to optimize // // Return Value: // The optimized tree (that can have any shape). // GenTree* Compiler::fgOptimizeCast(GenTreeCast* cast) { GenTree* src = cast->CastOp(); if (gtIsActiveCSE_Candidate(cast) || gtIsActiveCSE_Candidate(src)) { return cast; } // See if we can discard the cast. if (varTypeIsIntegral(cast) && varTypeIsIntegral(src)) { IntegralRange srcRange = IntegralRange::ForNode(src, this); IntegralRange noOvfRange = IntegralRange::ForCastInput(cast); if (noOvfRange.Contains(srcRange)) { // Casting between same-sized types is a no-op, // given we have proven this cast cannot overflow. if (genActualType(cast) == genActualType(src)) { return src; } cast->ClearOverflow(); cast->SetAllEffectsFlags(src); // Try and see if we can make this cast into a cheaper zero-extending version. if (genActualTypeIsInt(src) && cast->TypeIs(TYP_LONG) && srcRange.IsPositive()) { cast->SetUnsigned(); } } // For checked casts, we're done. if (cast->gtOverflow()) { return cast; } var_types castToType = cast->CastToType(); // For indir-like nodes, we may be able to change their type to satisfy (and discard) the cast. if (varTypeIsSmall(castToType) && (genTypeSize(castToType) == genTypeSize(src)) && src->OperIs(GT_IND, GT_CLS_VAR, GT_LCL_FLD)) { // We're changing the type here so we need to update the VN; // in other cases we discard the cast without modifying src // so the VN doesn't change. src->ChangeType(castToType); src->SetVNsFromNode(cast); return src; } // Try to narrow the operand of the cast and discard the cast. if (opts.OptEnabled(CLFLG_TREETRANS) && (genTypeSize(src) > genTypeSize(castToType)) && optNarrowTree(src, src->TypeGet(), castToType, cast->gtVNPair, false)) { optNarrowTree(src, src->TypeGet(), castToType, cast->gtVNPair, true); // "optNarrowTree" may leave a dead cast behind. if (src->OperIs(GT_CAST) && (src->AsCast()->CastToType() == genActualType(src->AsCast()->CastOp()))) { src = src->AsCast()->CastOp(); } return src; } // Check for two consecutive casts, we may be able to discard the intermediate one. if (opts.OptimizationEnabled() && src->OperIs(GT_CAST) && !src->gtOverflow()) { var_types dstCastToType = castToType; var_types srcCastToType = src->AsCast()->CastToType(); // CAST(ubyte <- CAST(short <- X)): CAST(ubyte <- X). // CAST(ushort <- CAST(short <- X)): CAST(ushort <- X). if (varTypeIsSmall(srcCastToType) && (genTypeSize(dstCastToType) <= genTypeSize(srcCastToType))) { cast->CastOp() = src->AsCast()->CastOp(); DEBUG_DESTROY_NODE(src); } } } return cast; } //------------------------------------------------------------------------ // fgOptimizeEqualityComparisonWithConst: optimizes various EQ/NE(OP, CONST) patterns. // // Arguments: // cmp - The GT_NE/GT_EQ tree the second operand of which is an integral constant // // Return Value: // The optimized tree, "cmp" in case no optimizations were done. // Currently only returns relop trees. // GenTree* Compiler::fgOptimizeEqualityComparisonWithConst(GenTreeOp* cmp) { assert(cmp->OperIs(GT_EQ, GT_NE)); assert(cmp->gtGetOp2()->IsIntegralConst()); assert(!optValnumCSE_phase); GenTree* op1 = cmp->gtGetOp1(); GenTreeIntConCommon* op2 = cmp->gtGetOp2()->AsIntConCommon(); // Check for "(expr +/- icon1) ==/!= (non-zero-icon2)". if (op2->IsCnsIntOrI() && (op2->IconValue() != 0)) { // Since this can occur repeatedly we use a while loop. while (op1->OperIs(GT_ADD, GT_SUB) && op1->AsOp()->gtGetOp2()->IsCnsIntOrI() && op1->TypeIs(TYP_INT) && !op1->gtOverflow()) { // Got it; change "x + icon1 == icon2" to "x == icon2 - icon1". ssize_t op1Value = op1->AsOp()->gtGetOp2()->AsIntCon()->IconValue(); ssize_t op2Value = op2->IconValue(); if (op1->OperIs(GT_ADD)) { op2Value -= op1Value; } else { op2Value += op1Value; } op1 = op1->AsOp()->gtGetOp1(); op2->SetIconValue(static_cast<int32_t>(op2Value)); } cmp->gtOp1 = op1; fgUpdateConstTreeValueNumber(op2); } // Here we look for the following tree // // EQ/NE // / \. // op1 CNS 0/1 // if (op2->IsIntegralConst(0) || op2->IsIntegralConst(1)) { ssize_t op2Value = static_cast<ssize_t>(op2->IntegralValue()); if (op1->OperIsCompare()) { // Here we look for the following tree // // EQ/NE -> RELOP/!RELOP // / \ / \. // RELOP CNS 0/1 // / \. // // Note that we will remove/destroy the EQ/NE node and move // the RELOP up into it's location. // Here we reverse the RELOP if necessary. bool reverse = ((op2Value == 0) == (cmp->OperIs(GT_EQ))); if (reverse) { gtReverseCond(op1); } noway_assert((op1->gtFlags & GTF_RELOP_JMP_USED) == 0); op1->gtFlags |= cmp->gtFlags & (GTF_RELOP_JMP_USED | GTF_DONT_CSE); op1->SetVNsFromNode(cmp); DEBUG_DESTROY_NODE(cmp); return op1; } // // Now we check for a compare with the result of an '&' operator // // Here we look for the following transformation: // // EQ/NE EQ/NE // / \ / \. // AND CNS 0/1 -> AND CNS 0 // / \ / \. // RSZ/RSH CNS 1 x CNS (1 << y) // / \. // x CNS_INT +y if (fgGlobalMorph && op1->OperIs(GT_AND) && op1->AsOp()->gtGetOp1()->OperIs(GT_RSZ, GT_RSH)) { GenTreeOp* andOp = op1->AsOp(); GenTreeOp* rshiftOp = andOp->gtGetOp1()->AsOp(); if (!rshiftOp->gtGetOp2()->IsCnsIntOrI()) { goto SKIP; } ssize_t shiftAmount = rshiftOp->gtGetOp2()->AsIntCon()->IconValue(); if (shiftAmount < 0) { goto SKIP; } if (!andOp->gtGetOp2()->IsIntegralConst(1)) { goto SKIP; } GenTreeIntConCommon* andMask = andOp->gtGetOp2()->AsIntConCommon(); if (andOp->TypeIs(TYP_INT)) { if (shiftAmount > 31) { goto SKIP; } andMask->SetIconValue(static_cast<int32_t>(1 << shiftAmount)); // Reverse the condition if necessary. if (op2Value == 1) { gtReverseCond(cmp); op2->SetIconValue(0); } } else if (andOp->TypeIs(TYP_LONG)) { if (shiftAmount > 63) { goto SKIP; } andMask->SetLngValue(1ll << shiftAmount); // Reverse the cond if necessary if (op2Value == 1) { gtReverseCond(cmp); op2->SetLngValue(0); } } andOp->gtOp1 = rshiftOp->gtGetOp1(); DEBUG_DESTROY_NODE(rshiftOp->gtGetOp2()); DEBUG_DESTROY_NODE(rshiftOp); } } SKIP: // Now check for compares with small constant longs that can be cast to int. // Note that we filter out negative values here so that the transformations // below are correct. E. g. "EQ(-1L, CAST_UN(int))" is always "false", but were // we to make it into "EQ(-1, int)", "true" becomes possible for negative inputs. if (!op2->TypeIs(TYP_LONG) || ((op2->LngValue() >> 31) != 0)) { return cmp; } if (!op1->OperIs(GT_AND)) { // Another interesting case: cast from int. if (op1->OperIs(GT_CAST) && op1->AsCast()->CastOp()->TypeIs(TYP_INT) && !op1->gtOverflow()) { // Simply make this into an integer comparison. cmp->gtOp1 = op1->AsCast()->CastOp(); op2->BashToConst(static_cast<int32_t>(op2->LngValue())); fgUpdateConstTreeValueNumber(op2); } return cmp; } // Now we perform the following optimization: // EQ/NE(AND(OP long, CNS_LNG), CNS_LNG) => // EQ/NE(AND(CAST(int <- OP), CNS_INT), CNS_INT) // when the constants are sufficiently small. // This transform cannot preserve VNs. if (fgGlobalMorph) { assert(op1->TypeIs(TYP_LONG) && op1->OperIs(GT_AND)); // Is the result of the mask effectively an INT? GenTreeOp* andOp = op1->AsOp(); if (!andOp->gtGetOp2()->OperIs(GT_CNS_NATIVELONG)) { return cmp; } GenTreeIntConCommon* andMask = andOp->gtGetOp2()->AsIntConCommon(); if ((andMask->LngValue() >> 32) != 0) { return cmp; } // Now we narrow the first operand of AND to int. if (optNarrowTree(andOp->gtGetOp1(), TYP_LONG, TYP_INT, ValueNumPair(), false)) { optNarrowTree(andOp->gtGetOp1(), TYP_LONG, TYP_INT, ValueNumPair(), true); } else { andOp->gtOp1 = gtNewCastNode(TYP_INT, andOp->gtGetOp1(), false, TYP_INT); } assert(andMask == andOp->gtGetOp2()); // Now replace the mask node. andMask->BashToConst(static_cast<int32_t>(andMask->LngValue())); // Now change the type of the AND node. andOp->ChangeType(TYP_INT); // Finally we replace the comparand. op2->BashToConst(static_cast<int32_t>(op2->LngValue())); } return cmp; } //------------------------------------------------------------------------ // fgOptimizeRelationalComparisonWithConst: optimizes a comparison operation. // // Recognizes comparisons against various constant operands and morphs // them, if possible, into comparisons against zero. // // Arguments: // cmp - the GT_LE/GT_LT/GT_GE/GT_GT tree to morph. // // Return Value: // The "cmp" tree, possibly with a modified oper. // The second operand's constant value may be modified as well. // // Assumptions: // The operands have been swapped so that any constants are on the right. // The second operand is an integral constant. // GenTree* Compiler::fgOptimizeRelationalComparisonWithConst(GenTreeOp* cmp) { assert(cmp->OperIs(GT_LE, GT_LT, GT_GE, GT_GT)); assert(cmp->gtGetOp2()->IsIntegralConst()); assert(!gtIsActiveCSE_Candidate(cmp->gtGetOp2())); GenTree* op1 = cmp->gtGetOp1(); GenTreeIntConCommon* op2 = cmp->gtGetOp2()->AsIntConCommon(); assert(genActualType(op1) == genActualType(op2)); genTreeOps oper = cmp->OperGet(); int64_t op2Value = op2->IntegralValue(); if (op2Value == 1) { // Check for "expr >= 1". if (oper == GT_GE) { // Change to "expr != 0" for unsigned and "expr > 0" for signed. oper = cmp->IsUnsigned() ? GT_NE : GT_GT; } // Check for "expr < 1". else if (oper == GT_LT) { // Change to "expr == 0" for unsigned and "expr <= 0". oper = cmp->IsUnsigned() ? GT_EQ : GT_LE; } } // Check for "expr relop -1". else if (!cmp->IsUnsigned() && (op2Value == -1)) { // Check for "expr <= -1". if (oper == GT_LE) { // Change to "expr < 0". oper = GT_LT; } // Check for "expr > -1". else if (oper == GT_GT) { // Change to "expr >= 0". oper = GT_GE; } } else if (cmp->IsUnsigned()) { if ((oper == GT_LE) || (oper == GT_GT)) { if (op2Value == 0) { // IL doesn't have a cne instruction so compilers use cgt.un instead. The JIT // recognizes certain patterns that involve GT_NE (e.g (x & 4) != 0) and fails // if GT_GT is used instead. Transform (x GT_GT.unsigned 0) into (x GT_NE 0) // and (x GT_LE.unsigned 0) into (x GT_EQ 0). The later case is rare, it sometimes // occurs as a result of branch inversion. oper = (oper == GT_LE) ? GT_EQ : GT_NE; cmp->gtFlags &= ~GTF_UNSIGNED; } // LE_UN/GT_UN(expr, int/long.MaxValue) => GE/LT(expr, 0). else if (((op1->TypeIs(TYP_LONG) && (op2Value == INT64_MAX))) || ((genActualType(op1) == TYP_INT) && (op2Value == INT32_MAX))) { oper = (oper == GT_LE) ? GT_GE : GT_LT; cmp->gtFlags &= ~GTF_UNSIGNED; } } } if (!cmp->OperIs(oper)) { // Keep the old ValueNumber for 'tree' as the new expr // will still compute the same value as before. cmp->SetOper(oper, GenTree::PRESERVE_VN); op2->SetIntegralValue(0); fgUpdateConstTreeValueNumber(op2); } return cmp; } #ifdef FEATURE_HW_INTRINSICS //------------------------------------------------------------------------ // fgOptimizeHWIntrinsic: optimize a HW intrinsic node // // Arguments: // node - HWIntrinsic node to examine // // Returns: // The original node if no optimization happened or if tree bashing occured. // An alternative tree if an optimization happened. // // Notes: // Checks for HWIntrinsic nodes: Vector64.Create/Vector128.Create/Vector256.Create, // and if the call is one of these, attempt to optimize. // This is post-order, meaning that it will not morph the children. // GenTree* Compiler::fgOptimizeHWIntrinsic(GenTreeHWIntrinsic* node) { assert(!optValnumCSE_phase); if (opts.OptimizationDisabled()) { return node; } switch (node->GetHWIntrinsicId()) { case NI_Vector128_Create: #if defined(TARGET_XARCH) case NI_Vector256_Create: #elif defined(TARGET_ARM64) case NI_Vector64_Create: #endif { bool hwAllArgsAreConstZero = true; for (GenTree* arg : node->Operands()) { if (!arg->IsIntegralConst(0) && !arg->IsFloatPositiveZero()) { hwAllArgsAreConstZero = false; break; } } if (hwAllArgsAreConstZero) { switch (node->GetHWIntrinsicId()) { case NI_Vector128_Create: { node->ResetHWIntrinsicId(NI_Vector128_get_Zero); break; } #if defined(TARGET_XARCH) case NI_Vector256_Create: { node->ResetHWIntrinsicId(NI_Vector256_get_Zero); break; } #elif defined(TARGET_ARM64) case NI_Vector64_Create: { node->ResetHWIntrinsicId(NI_Vector64_get_Zero); break; } #endif default: unreached(); } } break; } default: break; } return node; } #endif //------------------------------------------------------------------------ // fgOptimizeCommutativeArithmetic: Optimizes commutative operations. // // Arguments: // tree - the unchecked GT_ADD/GT_MUL/GT_OR/GT_XOR/GT_AND tree to optimize. // // Return Value: // The optimized tree that can have any shape. // GenTree* Compiler::fgOptimizeCommutativeArithmetic(GenTreeOp* tree) { assert(tree->OperIs(GT_ADD, GT_MUL, GT_OR, GT_XOR, GT_AND)); assert(!tree->gtOverflowEx()); // Commute constants to the right. if (tree->gtGetOp1()->OperIsConst() && !tree->gtGetOp1()->TypeIs(TYP_REF)) { // TODO-Review: We used to assert here that "(!op2->OperIsConst() || !opts.OptEnabled(CLFLG_CONSTANTFOLD))". // This may indicate a missed "remorph". Task is to re-enable this assertion and investigate. std::swap(tree->gtOp1, tree->gtOp2); } if (fgOperIsBitwiseRotationRoot(tree->OperGet())) { GenTree* rotationTree = fgRecognizeAndMorphBitwiseRotation(tree); if (rotationTree != nullptr) { return rotationTree; } } if (fgGlobalMorph && tree->OperIs(GT_AND, GT_OR, GT_XOR)) { GenTree* castTree = fgMorphCastedBitwiseOp(tree->AsOp()); if (castTree != nullptr) { return castTree; } } if (varTypeIsIntegralOrI(tree)) { genTreeOps oldTreeOper = tree->OperGet(); GenTreeOp* optimizedTree = fgMorphCommutative(tree->AsOp()); if (optimizedTree != nullptr) { if (!optimizedTree->OperIs(oldTreeOper)) { // "optimizedTree" could end up being a COMMA. return optimizedTree; } tree = optimizedTree; } } if (!optValnumCSE_phase) { GenTree* optimizedTree = nullptr; if (tree->OperIs(GT_ADD)) { optimizedTree = fgOptimizeAddition(tree); } else if (tree->OperIs(GT_MUL)) { optimizedTree = fgOptimizeMultiply(tree); } else if (tree->OperIs(GT_AND)) { optimizedTree = fgOptimizeBitwiseAnd(tree); } if (optimizedTree != nullptr) { return optimizedTree; } } return tree; } //------------------------------------------------------------------------ // fgOptimizeAddition: optimizes addition. // // Arguments: // add - the unchecked GT_ADD tree to optimize. // // Return Value: // The optimized tree, that can have any shape, in case any transformations // were performed. Otherwise, "nullptr", guaranteeing no state change. // GenTree* Compiler::fgOptimizeAddition(GenTreeOp* add) { assert(add->OperIs(GT_ADD) && !add->gtOverflow()); assert(!optValnumCSE_phase); GenTree* op1 = add->gtGetOp1(); GenTree* op2 = add->gtGetOp2(); // Fold "((x + icon1) + (y + icon2))" to ((x + y) + (icon1 + icon2))". // Be careful not to create a byref pointer that may point outside of the ref object. // Only do this in global morph as we don't recompute the VN for "(x + y)", the new "op2". if (op1->OperIs(GT_ADD) && op2->OperIs(GT_ADD) && !op1->gtOverflow() && !op2->gtOverflow() && op1->AsOp()->gtGetOp2()->IsCnsIntOrI() && op2->AsOp()->gtGetOp2()->IsCnsIntOrI() && !varTypeIsGC(op1->AsOp()->gtGetOp1()) && !varTypeIsGC(op2->AsOp()->gtGetOp1()) && fgGlobalMorph) { GenTreeOp* addOne = op1->AsOp(); GenTreeOp* addTwo = op2->AsOp(); GenTreeIntCon* constOne = addOne->gtGetOp2()->AsIntCon(); GenTreeIntCon* constTwo = addTwo->gtGetOp2()->AsIntCon(); addOne->gtOp2 = addTwo->gtGetOp1(); addOne->SetAllEffectsFlags(addOne->gtGetOp1(), addOne->gtGetOp2()); DEBUG_DESTROY_NODE(addTwo); constOne->SetValueTruncating(constOne->IconValue() + constTwo->IconValue()); op2 = constOne; add->gtOp2 = constOne; DEBUG_DESTROY_NODE(constTwo); } // Fold (x + 0) - given it won't change the tree type to TYP_REF. // TODO-Bug: this code will lose the GC-ness of a tree like "native int + byref(0)". if (op2->IsIntegralConst(0) && ((add->TypeGet() == op1->TypeGet()) || !op1->TypeIs(TYP_REF))) { if (op2->IsCnsIntOrI() && varTypeIsI(op1)) { fgAddFieldSeqForZeroOffset(op1, op2->AsIntCon()->gtFieldSeq); } DEBUG_DESTROY_NODE(op2); DEBUG_DESTROY_NODE(add); return op1; } // Note that these transformations are legal for floating-point ADDs as well. if (opts.OptimizationEnabled()) { // - a + b = > b - a // ADD((NEG(a), b) => SUB(b, a) // Do not do this if "op2" is constant for canonicalization purposes. if (op1->OperIs(GT_NEG) && !op2->OperIs(GT_NEG) && !op2->IsIntegralConst() && gtCanSwapOrder(op1, op2)) { add->SetOper(GT_SUB); add->gtOp1 = op2; add->gtOp2 = op1->AsOp()->gtGetOp1(); DEBUG_DESTROY_NODE(op1); return add; } // a + -b = > a - b // ADD(a, (NEG(b)) => SUB(a, b) if (!op1->OperIs(GT_NEG) && op2->OperIs(GT_NEG)) { add->SetOper(GT_SUB); add->gtOp2 = op2->AsOp()->gtGetOp1(); DEBUG_DESTROY_NODE(op2); return add; } } return nullptr; } //------------------------------------------------------------------------ // fgOptimizeMultiply: optimizes multiplication. // // Arguments: // mul - the unchecked TYP_I_IMPL/TYP_INT GT_MUL tree to optimize. // // Return Value: // The optimized tree, that can have any shape, in case any transformations // were performed. Otherwise, "nullptr", guaranteeing no state change. // GenTree* Compiler::fgOptimizeMultiply(GenTreeOp* mul) { assert(mul->OperIs(GT_MUL)); assert(varTypeIsIntOrI(mul) || varTypeIsFloating(mul)); assert(!mul->gtOverflow()); assert(!optValnumCSE_phase); GenTree* op1 = mul->gtGetOp1(); GenTree* op2 = mul->gtGetOp2(); assert(mul->TypeGet() == genActualType(op1)); assert(mul->TypeGet() == genActualType(op2)); if (opts.OptimizationEnabled() && op2->IsCnsFltOrDbl()) { double multiplierValue = op2->AsDblCon()->gtDconVal; if (multiplierValue == 1.0) { // Fold "x * 1.0" to "x". DEBUG_DESTROY_NODE(op2); DEBUG_DESTROY_NODE(mul); return op1; } // Fold "x * 2.0" to "x + x". // If op1 is not a local we will have to introduce a temporary via GT_COMMA. // Unfortunately, it's not optHoistLoopCode-friendly (yet), so we'll only do // this for locals / after hoisting has run (when rationalization remorphs // math INTRINSICSs into calls...). if ((multiplierValue == 2.0) && (op1->IsLocal() || (fgOrder == FGOrderLinear))) { op2 = fgMakeMultiUse(&op1); GenTree* add = gtNewOperNode(GT_ADD, mul->TypeGet(), op1, op2); INDEBUG(add->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED); return add; } } if (op2->IsIntegralConst()) { ssize_t mult = op2->AsIntConCommon()->IconValue(); bool op2IsConstIndex = op2->OperGet() == GT_CNS_INT && op2->AsIntCon()->gtFieldSeq != nullptr && op2->AsIntCon()->gtFieldSeq->IsConstantIndexFieldSeq(); assert(!op2IsConstIndex || op2->AsIntCon()->gtFieldSeq->m_next == nullptr); if (mult == 0) { // We may be able to throw away op1 (unless it has side-effects) if ((op1->gtFlags & GTF_SIDE_EFFECT) == 0) { DEBUG_DESTROY_NODE(op1); DEBUG_DESTROY_NODE(mul); return op2; // Just return the "0" node } // We need to keep op1 for the side-effects. Hang it off a GT_COMMA node. mul->ChangeOper(GT_COMMA, GenTree::PRESERVE_VN); return mul; } #ifdef TARGET_XARCH // Should we try to replace integer multiplication with lea/add/shift sequences? bool mulShiftOpt = compCodeOpt() != SMALL_CODE; #else // !TARGET_XARCH bool mulShiftOpt = false; #endif // !TARGET_XARCH size_t abs_mult = (mult >= 0) ? mult : -mult; size_t lowestBit = genFindLowestBit(abs_mult); bool changeToShift = false; // is it a power of two? (positive or negative) if (abs_mult == lowestBit) { // if negative negate (min-int does not need negation) if (mult < 0 && mult != SSIZE_T_MIN) { op1 = gtNewOperNode(GT_NEG, genActualType(op1), op1); mul->gtOp1 = op1; fgMorphTreeDone(op1); } // If "op2" is a constant array index, the other multiplicand must be a constant. // Transfer the annotation to the other one. if (op2->OperGet() == GT_CNS_INT && op2->AsIntCon()->gtFieldSeq != nullptr && op2->AsIntCon()->gtFieldSeq->IsConstantIndexFieldSeq()) { assert(op2->AsIntCon()->gtFieldSeq->m_next == nullptr); GenTree* otherOp = op1; if (otherOp->OperGet() == GT_NEG) { otherOp = otherOp->AsOp()->gtOp1; } assert(otherOp->OperGet() == GT_CNS_INT); assert(otherOp->AsIntCon()->gtFieldSeq == FieldSeqStore::NotAField()); otherOp->AsIntCon()->gtFieldSeq = op2->AsIntCon()->gtFieldSeq; } if (abs_mult == 1) { DEBUG_DESTROY_NODE(op2); DEBUG_DESTROY_NODE(mul); return op1; } // Change the multiplication into a shift by log2(val) bits. op2->AsIntConCommon()->SetIconValue(genLog2(abs_mult)); changeToShift = true; } else if (mulShiftOpt && (lowestBit > 1) && jitIsScaleIndexMul(lowestBit)) { int shift = genLog2(lowestBit); ssize_t factor = abs_mult >> shift; if (factor == 3 || factor == 5 || factor == 9) { // if negative negate (min-int does not need negation) if (mult < 0 && mult != SSIZE_T_MIN) { op1 = gtNewOperNode(GT_NEG, genActualType(op1), op1); mul->gtOp1 = op1; fgMorphTreeDone(op1); } GenTree* factorIcon = gtNewIconNode(factor, mul->TypeGet()); if (op2IsConstIndex) { factorIcon->AsIntCon()->gtFieldSeq = GetFieldSeqStore()->CreateSingleton(FieldSeqStore::ConstantIndexPseudoField); } // change the multiplication into a smaller multiplication (by 3, 5 or 9) and a shift op1 = gtNewOperNode(GT_MUL, mul->TypeGet(), op1, factorIcon); mul->gtOp1 = op1; fgMorphTreeDone(op1); op2->AsIntConCommon()->SetIconValue(shift); changeToShift = true; } } if (changeToShift) { fgUpdateConstTreeValueNumber(op2); mul->ChangeOper(GT_LSH, GenTree::PRESERVE_VN); return mul; } } return nullptr; } //------------------------------------------------------------------------ // fgOptimizeBitwiseAnd: optimizes the "and" operation. // // Arguments: // andOp - the GT_AND tree to optimize. // // Return Value: // The optimized tree, currently always a relop, in case any transformations // were performed. Otherwise, "nullptr", guaranteeing no state change. // GenTree* Compiler::fgOptimizeBitwiseAnd(GenTreeOp* andOp) { assert(andOp->OperIs(GT_AND)); assert(!optValnumCSE_phase); GenTree* op1 = andOp->gtGetOp1(); GenTree* op2 = andOp->gtGetOp2(); // Fold "cmp & 1" to just "cmp". if (andOp->TypeIs(TYP_INT) && op1->OperIsCompare() && op2->IsIntegralConst(1)) { DEBUG_DESTROY_NODE(op2); DEBUG_DESTROY_NODE(andOp); return op1; } return nullptr; } //------------------------------------------------------------------------ // fgOptimizeRelationalComparisonWithCasts: Recognizes comparisons against // various cast operands and tries to remove them. E.g.: // // * GE int // +--* CAST long <- ulong <- uint // | \--* X int // \--* CNS_INT long // // to: // // * GE_un int // +--* X int // \--* CNS_INT int // // same for: // // * GE int // +--* CAST long <- ulong <- uint // | \--* X int // \--* CAST long <- [u]long <- int // \--* ARR_LEN int // // These patterns quite often show up along with index checks // // Arguments: // cmp - the GT_LE/GT_LT/GT_GE/GT_GT tree to morph. // // Return Value: // Returns the same tree where operands might have narrower types // // Notes: // TODO-Casts: consider unifying this function with "optNarrowTree" // GenTree* Compiler::fgOptimizeRelationalComparisonWithCasts(GenTreeOp* cmp) { assert(cmp->OperIs(GT_LE, GT_LT, GT_GE, GT_GT)); assert(!optValnumCSE_phase); GenTree* op1 = cmp->gtGetOp1(); GenTree* op2 = cmp->gtGetOp2(); // Caller is expected to call this function only if we have CAST nodes assert(op1->OperIs(GT_CAST) || op2->OperIs(GT_CAST)); if (!op1->TypeIs(TYP_LONG)) { // We can extend this logic to handle small types as well, but currently it's done mostly to // assist range check elimination return cmp; } GenTree* castOp; GenTree* knownPositiveOp; bool knownPositiveIsOp2; if (op2->IsIntegralConst() || ((op2->OperIs(GT_CAST) && op2->AsCast()->CastOp()->OperIs(GT_ARR_LENGTH)))) { // op2 is either a LONG constant or (T)ARR_LENGTH knownPositiveIsOp2 = true; castOp = cmp->gtGetOp1(); knownPositiveOp = cmp->gtGetOp2(); } else { // op1 is either a LONG constant (yes, it's pretty normal for relops) // or (T)ARR_LENGTH castOp = cmp->gtGetOp2(); knownPositiveOp = cmp->gtGetOp1(); knownPositiveIsOp2 = false; } if (castOp->OperIs(GT_CAST) && varTypeIsLong(castOp->CastToType()) && castOp->AsCast()->CastOp()->TypeIs(TYP_INT) && castOp->IsUnsigned() && !castOp->gtOverflow()) { bool knownPositiveFitsIntoU32 = false; if (knownPositiveOp->IsIntegralConst() && FitsIn<UINT32>(knownPositiveOp->AsIntConCommon()->IntegralValue())) { // BTW, we can fold the whole condition if op2 doesn't fit into UINT_MAX. knownPositiveFitsIntoU32 = true; } else if (knownPositiveOp->OperIs(GT_CAST) && varTypeIsLong(knownPositiveOp->CastToType()) && knownPositiveOp->AsCast()->CastOp()->OperIs(GT_ARR_LENGTH)) { knownPositiveFitsIntoU32 = true; // TODO-Casts: recognize Span.Length here as well. } if (!knownPositiveFitsIntoU32) { return cmp; } JITDUMP("Removing redundant cast(s) for:\n") DISPTREE(cmp) JITDUMP("\n\nto:\n\n") cmp->SetUnsigned(); // Drop cast from castOp if (knownPositiveIsOp2) { cmp->gtOp1 = castOp->AsCast()->CastOp(); } else { cmp->gtOp2 = castOp->AsCast()->CastOp(); } DEBUG_DESTROY_NODE(castOp); if (knownPositiveOp->OperIs(GT_CAST)) { // Drop cast from knownPositiveOp too if (knownPositiveIsOp2) { cmp->gtOp2 = knownPositiveOp->AsCast()->CastOp(); } else { cmp->gtOp1 = knownPositiveOp->AsCast()->CastOp(); } DEBUG_DESTROY_NODE(knownPositiveOp); } else { // Change type for constant from LONG to INT knownPositiveOp->ChangeType(TYP_INT); #ifndef TARGET_64BIT assert(knownPositiveOp->OperIs(GT_CNS_LNG)); knownPositiveOp->BashToConst(static_cast<int>(knownPositiveOp->AsIntConCommon()->IntegralValue())); #endif fgUpdateConstTreeValueNumber(knownPositiveOp); } DISPTREE(cmp) JITDUMP("\n") } return cmp; } //------------------------------------------------------------------------ // fgPropagateCommaThrow: propagate a "comma throw" up the tree. // // "Comma throws" in the compiler represent the canonical form of an always // throwing expression. They have the shape of COMMA(THROW, ZERO), to satisfy // the semantic that the original expression produced some value and are // generated by "gtFoldExprConst" when it encounters checked arithmetic that // will determinably overflow. // // In the global morphing phase, "comma throws" are "propagated" up the tree, // in post-order, to eliminate nodes that will never execute. This method, // called by "fgMorphSmpOp", encapsulates this optimization. // // Arguments: // parent - the node currently being processed. // commaThrow - the comma throw in question, "parent"'s operand. // precedingSideEffects - side effects of nodes preceding "comma" in execution order. // // Return Value: // If "parent" is to be replaced with a comma throw, i. e. the propagation was successful, // the new "parent", otherwise "nullptr", guaranteeing no state change, with one exception: // the "fgRemoveRestOfBlock" "global" may be set. Note that the new returned tree does not // have to be a "comma throw", it can be "bare" throw call if the "parent" node did not // produce any value. // // Notes: // "Comma throws" are very rare. // GenTree* Compiler::fgPropagateCommaThrow(GenTree* parent, GenTreeOp* commaThrow, GenTreeFlags precedingSideEffects) { // Comma throw propagation does not preserve VNs, and deletes nodes. assert(fgGlobalMorph); assert(fgIsCommaThrow(commaThrow)); if ((commaThrow->gtFlags & GTF_COLON_COND) == 0) { fgRemoveRestOfBlock = true; } if ((precedingSideEffects & GTF_ALL_EFFECT) == 0) { if (parent->TypeIs(TYP_VOID)) { // Return the throw node as the new tree. return commaThrow->gtGetOp1(); } // Fix up the COMMA's type if needed. if (genActualType(parent) != genActualType(commaThrow)) { commaThrow->gtGetOp2()->BashToZeroConst(genActualType(parent)); commaThrow->ChangeType(genActualType(parent)); } return commaThrow; } return nullptr; } //---------------------------------------------------------------------------------------------- // fgMorphRetInd: Try to get rid of extra IND(ADDR()) pairs in a return tree. // // Arguments: // node - The return node that uses an indirection. // // Return Value: // the original op1 of the ret if there was no optimization or an optimized new op1. // GenTree* Compiler::fgMorphRetInd(GenTreeUnOp* ret) { assert(ret->OperIs(GT_RETURN)); assert(ret->gtGetOp1()->OperIs(GT_IND, GT_BLK, GT_OBJ)); GenTreeIndir* ind = ret->gtGetOp1()->AsIndir(); GenTree* addr = ind->Addr(); if (addr->OperIs(GT_ADDR) && addr->gtGetOp1()->OperIs(GT_LCL_VAR)) { // If struct promotion was undone, adjust the annotations if (fgGlobalMorph && fgMorphImplicitByRefArgs(addr)) { return ind; } // If `return` retypes LCL_VAR as a smaller struct it should not set `doNotEnregister` on that // LclVar. // Example: in `Vector128:AsVector2` we have RETURN SIMD8(OBJ SIMD8(ADDR byref(LCL_VAR SIMD16))). GenTreeLclVar* lclVar = addr->gtGetOp1()->AsLclVar(); if (!lvaIsImplicitByRefLocal(lclVar->GetLclNum())) { assert(!gtIsActiveCSE_Candidate(addr) && !gtIsActiveCSE_Candidate(ind)); unsigned indSize; if (ind->OperIs(GT_IND)) { indSize = genTypeSize(ind); } else { indSize = ind->AsBlk()->GetLayout()->GetSize(); } LclVarDsc* varDsc = lvaGetDesc(lclVar); unsigned lclVarSize; if (!lclVar->TypeIs(TYP_STRUCT)) { lclVarSize = genTypeSize(varDsc->TypeGet()); } else { lclVarSize = varDsc->lvExactSize; } // TODO: change conditions in `canFold` to `indSize <= lclVarSize`, but currently do not support `BITCAST // int<-SIMD16` etc. assert((indSize <= lclVarSize) || varDsc->lvDoNotEnregister); #if defined(TARGET_64BIT) bool canFold = (indSize == lclVarSize); #else // !TARGET_64BIT // TODO: improve 32 bit targets handling for LONG returns if necessary, nowadays we do not support `BITCAST // long<->double` there. bool canFold = (indSize == lclVarSize) && (lclVarSize <= REGSIZE_BYTES); #endif // TODO: support `genReturnBB != nullptr`, it requires #11413 to avoid `Incompatible types for // gtNewTempAssign`. if (canFold && (genReturnBB == nullptr)) { // Fold (TYPE1)*(&(TYPE2)x) even if types do not match, lowering will handle it. // Getting rid of this IND(ADDR()) pair allows to keep lclVar as not address taken // and enregister it. DEBUG_DESTROY_NODE(ind); DEBUG_DESTROY_NODE(addr); ret->gtOp1 = lclVar; // We use GTF_DONT_CSE as an "is under GT_ADDR" check. We can // get rid of it now since the GT_RETURN node should never have // its address taken. assert((ret->gtFlags & GTF_DONT_CSE) == 0); lclVar->gtFlags &= ~GTF_DONT_CSE; return lclVar; } else if (!varDsc->lvDoNotEnregister) { lvaSetVarDoNotEnregister(lclVar->GetLclNum() DEBUGARG(DoNotEnregisterReason::BlockOpRet)); } } } return ind; } #ifdef _PREFAST_ #pragma warning(pop) #endif GenTree* Compiler::fgMorphSmpOpOptional(GenTreeOp* tree) { genTreeOps oper = tree->gtOper; GenTree* op1 = tree->gtOp1; GenTree* op2 = tree->gtOp2; var_types typ = tree->TypeGet(); if (fgGlobalMorph && GenTree::OperIsCommutative(oper)) { /* Swap the operands so that the more expensive one is 'op1' */ if (tree->gtFlags & GTF_REVERSE_OPS) { tree->gtOp1 = op2; tree->gtOp2 = op1; op2 = op1; op1 = tree->gtOp1; tree->gtFlags &= ~GTF_REVERSE_OPS; } if (oper == op2->gtOper) { /* Reorder nested operators at the same precedence level to be left-recursive. For example, change "(a+(b+c))" to the equivalent expression "((a+b)+c)". */ /* Things are handled differently for floating-point operators */ if (!varTypeIsFloating(tree->TypeGet())) { fgMoveOpsLeft(tree); op1 = tree->gtOp1; op2 = tree->gtOp2; } } } #if REARRANGE_ADDS /* Change "((x+icon)+y)" to "((x+y)+icon)" Don't reorder floating-point operations */ if (fgGlobalMorph && (oper == GT_ADD) && !tree->gtOverflow() && (op1->gtOper == GT_ADD) && !op1->gtOverflow() && varTypeIsIntegralOrI(typ)) { GenTree* ad1 = op1->AsOp()->gtOp1; GenTree* ad2 = op1->AsOp()->gtOp2; if (!op2->OperIsConst() && ad2->OperIsConst()) { // This takes // + (tree) // / \. // / \. // / \. // + (op1) op2 // / \. // / \. // ad1 ad2 // // and it swaps ad2 and op2. // Don't create a byref pointer that may point outside of the ref object. // If a GC happens, the byref won't get updated. This can happen if one // of the int components is negative. It also requires the address generation // be in a fully-interruptible code region. if (!varTypeIsGC(ad1->TypeGet()) && !varTypeIsGC(op2->TypeGet())) { tree->gtOp2 = ad2; op1->AsOp()->gtOp2 = op2; op1->gtFlags |= op2->gtFlags & GTF_ALL_EFFECT; op2 = tree->gtOp2; } } } #endif /*------------------------------------------------------------------------- * Perform optional oper-specific postorder morphing */ switch (oper) { case GT_ASG: // Make sure we're allowed to do this. if (optValnumCSE_phase) { // It is not safe to reorder/delete CSE's break; } if (varTypeIsStruct(typ) && !tree->IsPhiDefn()) { if (tree->OperIsCopyBlkOp()) { return fgMorphCopyBlock(tree); } else { return fgMorphInitBlock(tree); } } if (typ == TYP_LONG) { break; } if (op2->gtFlags & GTF_ASG) { break; } if ((op2->gtFlags & GTF_CALL) && (op1->gtFlags & GTF_ALL_EFFECT)) { break; } /* Special case: a cast that can be thrown away */ // TODO-Cleanup: fgMorphSmp does a similar optimization. However, it removes only // one cast and sometimes there is another one after it that gets removed by this // code. fgMorphSmp should be improved to remove all redundant casts so this code // can be removed. if (op1->gtOper == GT_IND && op2->gtOper == GT_CAST && !op2->gtOverflow()) { var_types srct; var_types cast; var_types dstt; srct = op2->AsCast()->CastOp()->TypeGet(); cast = (var_types)op2->CastToType(); dstt = op1->TypeGet(); /* Make sure these are all ints and precision is not lost */ if (genTypeSize(cast) >= genTypeSize(dstt) && dstt <= TYP_INT && srct <= TYP_INT) { op2 = tree->gtOp2 = op2->AsCast()->CastOp(); } } break; case GT_MUL: /* Check for the case "(val + icon) * icon" */ if (op2->gtOper == GT_CNS_INT && op1->gtOper == GT_ADD) { GenTree* add = op1->AsOp()->gtOp2; if (add->IsCnsIntOrI() && (op2->GetScaleIndexMul() != 0)) { if (tree->gtOverflow() || op1->gtOverflow()) { break; } ssize_t imul = op2->AsIntCon()->gtIconVal; ssize_t iadd = add->AsIntCon()->gtIconVal; /* Change '(val + iadd) * imul' -> '(val * imul) + (iadd * imul)' */ oper = GT_ADD; tree->ChangeOper(oper); op2->AsIntCon()->SetValueTruncating(iadd * imul); op1->ChangeOper(GT_MUL); add->AsIntCon()->SetIconValue(imul); } } break; case GT_DIV: /* For "val / 1", just return "val" */ if (op2->IsIntegralConst(1)) { DEBUG_DESTROY_NODE(tree); return op1; } break; case GT_UDIV: case GT_UMOD: tree->CheckDivideByConstOptimized(this); break; case GT_LSH: /* Check for the case "(val + icon) << icon" */ if (!optValnumCSE_phase && op2->IsCnsIntOrI() && op1->gtOper == GT_ADD && !op1->gtOverflow()) { GenTree* cns = op1->AsOp()->gtOp2; if (cns->IsCnsIntOrI() && (op2->GetScaleIndexShf() != 0)) { ssize_t ishf = op2->AsIntConCommon()->IconValue(); ssize_t iadd = cns->AsIntConCommon()->IconValue(); // printf("Changing '(val+icon1)<<icon2' into '(val<<icon2+icon1<<icon2)'\n"); /* Change "(val + iadd) << ishf" into "(val<<ishf + iadd<<ishf)" */ tree->ChangeOper(GT_ADD); // we are reusing the shift amount node here, but the type we want is that of the shift result op2->gtType = op1->gtType; op2->AsIntConCommon()->SetValueTruncating(iadd << ishf); if (cns->gtOper == GT_CNS_INT && cns->AsIntCon()->gtFieldSeq != nullptr && cns->AsIntCon()->gtFieldSeq->IsConstantIndexFieldSeq()) { assert(cns->AsIntCon()->gtFieldSeq->m_next == nullptr); op2->AsIntCon()->gtFieldSeq = cns->AsIntCon()->gtFieldSeq; } op1->ChangeOper(GT_LSH); cns->AsIntConCommon()->SetIconValue(ishf); } } break; case GT_XOR: if (!optValnumCSE_phase) { /* "x ^ -1" is "~x" */ if (op2->IsIntegralConst(-1)) { tree->ChangeOper(GT_NOT); tree->gtOp2 = nullptr; DEBUG_DESTROY_NODE(op2); } else if (op2->IsIntegralConst(1) && op1->OperIsCompare()) { /* "binaryVal ^ 1" is "!binaryVal" */ gtReverseCond(op1); DEBUG_DESTROY_NODE(op2); DEBUG_DESTROY_NODE(tree); return op1; } } break; case GT_INIT_VAL: // Initialization values for initBlk have special semantics - their lower // byte is used to fill the struct. However, we allow 0 as a "bare" value, // which enables them to get a VNForZero, and be propagated. if (op1->IsIntegralConst(0)) { return op1; } break; default: break; } return tree; } #if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS) //------------------------------------------------------------------------ // fgMorphMultiOp: Morph a GenTreeMultiOp (SIMD/HWINTRINSIC) tree. // // Arguments: // multiOp - The tree to morph // // Return Value: // The fully morphed tree. // GenTree* Compiler::fgMorphMultiOp(GenTreeMultiOp* multiOp) { gtUpdateNodeOperSideEffects(multiOp); bool dontCseConstArguments = false; #if defined(FEATURE_HW_INTRINSICS) // Opportunistically, avoid unexpected CSE for hw intrinsics with IMM arguments if (multiOp->OperIs(GT_HWINTRINSIC)) { NamedIntrinsic hwIntrinsic = multiOp->AsHWIntrinsic()->GetHWIntrinsicId(); #if defined(TARGET_XARCH) if (HWIntrinsicInfo::lookupCategory(hwIntrinsic) == HW_Category_IMM) { dontCseConstArguments = true; } #elif defined(TARGET_ARMARCH) if (HWIntrinsicInfo::HasImmediateOperand(hwIntrinsic)) { dontCseConstArguments = true; } #endif } #endif for (GenTree** use : multiOp->UseEdges()) { *use = fgMorphTree(*use); GenTree* operand = *use; multiOp->gtFlags |= (operand->gtFlags & GTF_ALL_EFFECT); if (dontCseConstArguments && operand->OperIsConst()) { operand->SetDoNotCSE(); } // Promoted structs after morph must be in one of two states: // a) Fully eliminated from the IR (independent promotion) OR only be // used by "special" nodes (e. g. LHS of ASGs for multi-reg structs). // b) Marked as do-not-enregister (dependent promotion). // // So here we preserve this invariant and mark any promoted structs as do-not-enreg. // if (operand->OperIs(GT_LCL_VAR) && lvaGetDesc(operand->AsLclVar())->lvPromoted) { lvaSetVarDoNotEnregister(operand->AsLclVar()->GetLclNum() DEBUGARG(DoNotEnregisterReason::SimdUserForcesDep)); } } #if defined(FEATURE_HW_INTRINSICS) if (opts.OptimizationEnabled() && multiOp->OperIs(GT_HWINTRINSIC)) { GenTreeHWIntrinsic* hw = multiOp->AsHWIntrinsic(); switch (hw->GetHWIntrinsicId()) { #if defined(TARGET_XARCH) case NI_SSE_Xor: case NI_SSE2_Xor: case NI_AVX_Xor: case NI_AVX2_Xor: { // Transform XOR(X, 0) to X for vectors GenTree* op1 = hw->Op(1); GenTree* op2 = hw->Op(2); if (!gtIsActiveCSE_Candidate(hw)) { if (op1->IsIntegralConstVector(0) && !gtIsActiveCSE_Candidate(op1)) { DEBUG_DESTROY_NODE(hw); DEBUG_DESTROY_NODE(op1); return op2; } if (op2->IsIntegralConstVector(0) && !gtIsActiveCSE_Candidate(op2)) { DEBUG_DESTROY_NODE(hw); DEBUG_DESTROY_NODE(op2); return op1; } } break; } #endif case NI_Vector128_Create: #if defined(TARGET_XARCH) case NI_Vector256_Create: #elif defined(TARGET_ARMARCH) case NI_Vector64_Create: #endif { bool hwAllArgsAreConst = true; for (GenTree** use : multiOp->UseEdges()) { if (!(*use)->OperIsConst()) { hwAllArgsAreConst = false; break; } } // Avoid unexpected CSE for constant arguments for Vector_.Create // but only if all arguments are constants. if (hwAllArgsAreConst) { for (GenTree** use : multiOp->UseEdges()) { (*use)->SetDoNotCSE(); } } } break; default: break; } } #endif // defined(FEATURE_HW_INTRINSICS) && defined(TARGET_XARCH) #ifdef FEATURE_HW_INTRINSICS if (multiOp->OperIsHWIntrinsic() && !optValnumCSE_phase) { return fgOptimizeHWIntrinsic(multiOp->AsHWIntrinsic()); } #endif return multiOp; } #endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS) //------------------------------------------------------------------------ // fgMorphModToSubMulDiv: Transform a % b into the equivalent a - (a / b) * b // (see ECMA III 3.55 and III.3.56). // // Arguments: // tree - The GT_MOD/GT_UMOD tree to morph // // Returns: // The morphed tree // // Notes: // For ARM64 we don't have a remainder instruction so this transform is // always done. For XARCH this transform is done if we know that magic // division will be used, in that case this transform allows CSE to // eliminate the redundant div from code like "x = a / 3; y = a % 3;". // GenTree* Compiler::fgMorphModToSubMulDiv(GenTreeOp* tree) { JITDUMP("\nMorphing MOD/UMOD [%06u] to Sub/Mul/Div\n", dspTreeID(tree)); if (tree->OperGet() == GT_MOD) { tree->SetOper(GT_DIV); } else if (tree->OperGet() == GT_UMOD) { tree->SetOper(GT_UDIV); } else { noway_assert(!"Illegal gtOper in fgMorphModToSubMulDiv"); } var_types type = tree->gtType; GenTree* const copyOfNumeratorValue = fgMakeMultiUse(&tree->gtOp1); GenTree* const copyOfDenominatorValue = fgMakeMultiUse(&tree->gtOp2); GenTree* const mul = gtNewOperNode(GT_MUL, type, tree, copyOfDenominatorValue); GenTree* const sub = gtNewOperNode(GT_SUB, type, copyOfNumeratorValue, mul); // Ensure "sub" does not evaluate "copyOfNumeratorValue" before it is defined by "mul". // sub->gtFlags |= GTF_REVERSE_OPS; #ifdef DEBUG sub->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif tree->CheckDivideByConstOptimized(this); return sub; } //------------------------------------------------------------------------------ // fgOperIsBitwiseRotationRoot : Check if the operation can be a root of a bitwise rotation tree. // // // Arguments: // oper - Operation to check // // Return Value: // True if the operation can be a root of a bitwise rotation tree; false otherwise. bool Compiler::fgOperIsBitwiseRotationRoot(genTreeOps oper) { return (oper == GT_OR) || (oper == GT_XOR); } //------------------------------------------------------------------------------ // fgRecognizeAndMorphBitwiseRotation : Check if the tree represents a left or right rotation. If so, return // an equivalent GT_ROL or GT_ROR tree; otherwise, return the original tree. // // Arguments: // tree - tree to check for a rotation pattern // // Return Value: // An equivalent GT_ROL or GT_ROR tree if a pattern is found; "nullptr" otherwise. // // Assumption: // The input is a GT_OR or a GT_XOR tree. GenTree* Compiler::fgRecognizeAndMorphBitwiseRotation(GenTree* tree) { // // Check for a rotation pattern, e.g., // // OR ROL // / \ / \. // LSH RSZ -> x y // / \ / \. // x AND x AND // / \ / \. // y 31 ADD 31 // / \. // NEG 32 // | // y // The patterns recognized: // (x << (y & M)) op (x >>> ((-y + N) & M)) // (x >>> ((-y + N) & M)) op (x << (y & M)) // // (x << y) op (x >>> (-y + N)) // (x >> > (-y + N)) op (x << y) // // (x >>> (y & M)) op (x << ((-y + N) & M)) // (x << ((-y + N) & M)) op (x >>> (y & M)) // // (x >>> y) op (x << (-y + N)) // (x << (-y + N)) op (x >>> y) // // (x << c1) op (x >>> c2) // (x >>> c1) op (x << c2) // // where // c1 and c2 are const // c1 + c2 == bitsize(x) // N == bitsize(x) // M is const // M & (N - 1) == N - 1 // op is either | or ^ if (((tree->gtFlags & GTF_PERSISTENT_SIDE_EFFECTS) != 0) || ((tree->gtFlags & GTF_ORDER_SIDEEFF) != 0)) { // We can't do anything if the tree has assignments, calls, or volatile // reads. Note that we allow GTF_EXCEPT side effect since any exceptions // thrown by the original tree will be thrown by the transformed tree as well. return nullptr; } genTreeOps oper = tree->OperGet(); assert(fgOperIsBitwiseRotationRoot(oper)); // Check if we have an LSH on one side of the OR and an RSZ on the other side. GenTree* op1 = tree->gtGetOp1(); GenTree* op2 = tree->gtGetOp2(); GenTree* leftShiftTree = nullptr; GenTree* rightShiftTree = nullptr; if ((op1->OperGet() == GT_LSH) && (op2->OperGet() == GT_RSZ)) { leftShiftTree = op1; rightShiftTree = op2; } else if ((op1->OperGet() == GT_RSZ) && (op2->OperGet() == GT_LSH)) { leftShiftTree = op2; rightShiftTree = op1; } else { return nullptr; } // Check if the trees representing the value to shift are identical. // We already checked that there are no side effects above. if (GenTree::Compare(leftShiftTree->gtGetOp1(), rightShiftTree->gtGetOp1())) { GenTree* rotatedValue = leftShiftTree->gtGetOp1(); var_types rotatedValueActualType = genActualType(rotatedValue->gtType); ssize_t rotatedValueBitSize = genTypeSize(rotatedValueActualType) * 8; noway_assert((rotatedValueBitSize == 32) || (rotatedValueBitSize == 64)); GenTree* leftShiftIndex = leftShiftTree->gtGetOp2(); GenTree* rightShiftIndex = rightShiftTree->gtGetOp2(); // The shift index may be masked. At least (rotatedValueBitSize - 1) lower bits // shouldn't be masked for the transformation to be valid. If additional // higher bits are not masked, the transformation is still valid since the result // of MSIL shift instructions is unspecified if the shift amount is greater or equal // than the width of the value being shifted. ssize_t minimalMask = rotatedValueBitSize - 1; ssize_t leftShiftMask = -1; ssize_t rightShiftMask = -1; if ((leftShiftIndex->OperGet() == GT_AND)) { if (leftShiftIndex->gtGetOp2()->IsCnsIntOrI()) { leftShiftMask = leftShiftIndex->gtGetOp2()->AsIntCon()->gtIconVal; leftShiftIndex = leftShiftIndex->gtGetOp1(); } else { return nullptr; } } if ((rightShiftIndex->OperGet() == GT_AND)) { if (rightShiftIndex->gtGetOp2()->IsCnsIntOrI()) { rightShiftMask = rightShiftIndex->gtGetOp2()->AsIntCon()->gtIconVal; rightShiftIndex = rightShiftIndex->gtGetOp1(); } else { return nullptr; } } if (((minimalMask & leftShiftMask) != minimalMask) || ((minimalMask & rightShiftMask) != minimalMask)) { // The shift index is overmasked, e.g., we have // something like (x << y & 15) or // (x >> (32 - y) & 15 with 32 bit x. // The transformation is not valid. return nullptr; } GenTree* shiftIndexWithAdd = nullptr; GenTree* shiftIndexWithoutAdd = nullptr; genTreeOps rotateOp = GT_NONE; GenTree* rotateIndex = nullptr; if (leftShiftIndex->OperGet() == GT_ADD) { shiftIndexWithAdd = leftShiftIndex; shiftIndexWithoutAdd = rightShiftIndex; rotateOp = GT_ROR; } else if (rightShiftIndex->OperGet() == GT_ADD) { shiftIndexWithAdd = rightShiftIndex; shiftIndexWithoutAdd = leftShiftIndex; rotateOp = GT_ROL; } if (shiftIndexWithAdd != nullptr) { if (shiftIndexWithAdd->gtGetOp2()->IsCnsIntOrI()) { if (shiftIndexWithAdd->gtGetOp2()->AsIntCon()->gtIconVal == rotatedValueBitSize) { if (shiftIndexWithAdd->gtGetOp1()->OperGet() == GT_NEG) { if (GenTree::Compare(shiftIndexWithAdd->gtGetOp1()->gtGetOp1(), shiftIndexWithoutAdd)) { // We found one of these patterns: // (x << (y & M)) | (x >>> ((-y + N) & M)) // (x << y) | (x >>> (-y + N)) // (x >>> (y & M)) | (x << ((-y + N) & M)) // (x >>> y) | (x << (-y + N)) // where N == bitsize(x), M is const, and // M & (N - 1) == N - 1 CLANG_FORMAT_COMMENT_ANCHOR; #ifndef TARGET_64BIT if (!shiftIndexWithoutAdd->IsCnsIntOrI() && (rotatedValueBitSize == 64)) { // TODO-X86-CQ: we need to handle variable-sized long shifts specially on x86. // GT_LSH, GT_RSH, and GT_RSZ have helpers for this case. We may need // to add helpers for GT_ROL and GT_ROR. return nullptr; } #endif rotateIndex = shiftIndexWithoutAdd; } } } } } else if ((leftShiftIndex->IsCnsIntOrI() && rightShiftIndex->IsCnsIntOrI())) { if (leftShiftIndex->AsIntCon()->gtIconVal + rightShiftIndex->AsIntCon()->gtIconVal == rotatedValueBitSize) { // We found this pattern: // (x << c1) | (x >>> c2) // where c1 and c2 are const and c1 + c2 == bitsize(x) rotateOp = GT_ROL; rotateIndex = leftShiftIndex; } } if (rotateIndex != nullptr) { noway_assert(GenTree::OperIsRotate(rotateOp)); GenTreeFlags inputTreeEffects = tree->gtFlags & GTF_ALL_EFFECT; // We can use the same tree only during global morph; reusing the tree in a later morph // may invalidate value numbers. if (fgGlobalMorph) { tree->AsOp()->gtOp1 = rotatedValue; tree->AsOp()->gtOp2 = rotateIndex; tree->ChangeOper(rotateOp); unsigned childFlags = 0; for (GenTree* op : tree->Operands()) { childFlags |= (op->gtFlags & GTF_ALL_EFFECT); } // The parent's flags should be a superset of its operands' flags noway_assert((inputTreeEffects & childFlags) == childFlags); } else { tree = gtNewOperNode(rotateOp, rotatedValueActualType, rotatedValue, rotateIndex); noway_assert(inputTreeEffects == (tree->gtFlags & GTF_ALL_EFFECT)); } return tree; } } return nullptr; } #if !defined(TARGET_64BIT) //------------------------------------------------------------------------------ // fgRecognizeAndMorphLongMul : Check for and morph long multiplication with 32 bit operands. // // Uses "GenTree::IsValidLongMul" to check for the long multiplication pattern. Will swap // operands if the first one is a constant and the second one is not, even for trees which // end up not being eligibile for long multiplication. // // Arguments: // mul - GT_MUL tree to check for a long multiplication opportunity // // Return Value: // The original tree, with operands possibly swapped, if it is not eligible for long multiplication. // Tree with GTF_MUL_64RSLT set, side effect flags propagated, and children morphed if it is. // GenTreeOp* Compiler::fgRecognizeAndMorphLongMul(GenTreeOp* mul) { assert(mul->OperIs(GT_MUL)); assert(mul->TypeIs(TYP_LONG)); GenTree* op1 = mul->gtGetOp1(); GenTree* op2 = mul->gtGetOp2(); // "IsValidLongMul" and decomposition do not handle constant op1. if (op1->IsIntegralConst()) { std::swap(op1, op2); mul->gtOp1 = op1; mul->gtOp2 = op2; } if (!mul->IsValidLongMul()) { return mul; } // MUL_LONG needs to do the work the casts would have done. mul->ClearUnsigned(); if (op1->IsUnsigned()) { mul->SetUnsigned(); } // "IsValidLongMul" returned "true", so this GT_MUL cannot overflow. mul->ClearOverflow(); mul->Set64RsltMul(); return fgMorphLongMul(mul); } //------------------------------------------------------------------------------ // fgMorphLongMul : Morphs GT_MUL nodes marked with GTF_MUL_64RSLT. // // Morphs *only* the operands of casts that compose the long mul to // avoid them being folded aways. // // Arguments: // mul - GT_MUL tree to morph operands of // // Return Value: // The original tree, with operands morphed and flags propagated. // GenTreeOp* Compiler::fgMorphLongMul(GenTreeOp* mul) { INDEBUG(mul->DebugCheckLongMul()); GenTree* op1 = mul->gtGetOp1(); GenTree* op2 = mul->gtGetOp2(); // Morph the operands. We cannot allow the casts to go away, so we morph their operands directly. op1->AsCast()->CastOp() = fgMorphTree(op1->AsCast()->CastOp()); op1->SetAllEffectsFlags(op1->AsCast()->CastOp()); if (op2->OperIs(GT_CAST)) { op2->AsCast()->CastOp() = fgMorphTree(op2->AsCast()->CastOp()); op2->SetAllEffectsFlags(op2->AsCast()->CastOp()); } mul->SetAllEffectsFlags(op1, op2); op1->SetDoNotCSE(); op2->SetDoNotCSE(); return mul; } #endif // !defined(TARGET_64BIT) /***************************************************************************** * * Transform the given tree for code generation and return an equivalent tree. */ GenTree* Compiler::fgMorphTree(GenTree* tree, MorphAddrContext* mac) { assert(tree); #ifdef DEBUG if (verbose) { if ((unsigned)JitConfig.JitBreakMorphTree() == tree->gtTreeID) { noway_assert(!"JitBreakMorphTree hit"); } } #endif #ifdef DEBUG int thisMorphNum = 0; if (verbose && treesBeforeAfterMorph) { thisMorphNum = morphNum++; printf("\nfgMorphTree (before %d):\n", thisMorphNum); gtDispTree(tree); } #endif if (fgGlobalMorph) { // Apply any rewrites for implicit byref arguments before morphing the // tree. if (fgMorphImplicitByRefArgs(tree)) { #ifdef DEBUG if (verbose && treesBeforeAfterMorph) { printf("\nfgMorphTree (%d), after implicit-byref rewrite:\n", thisMorphNum); gtDispTree(tree); } #endif } } /*------------------------------------------------------------------------- * fgMorphTree() can potentially replace a tree with another, and the * caller has to store the return value correctly. * Turn this on to always make copy of "tree" here to shake out * hidden/unupdated references. */ #ifdef DEBUG if (compStressCompile(STRESS_GENERIC_CHECK, 0)) { GenTree* copy; if (GenTree::s_gtNodeSizes[tree->gtOper] == TREE_NODE_SZ_SMALL) { copy = gtNewLargeOperNode(GT_ADD, TYP_INT); } else { copy = new (this, GT_CALL) GenTreeCall(TYP_INT); } copy->ReplaceWith(tree, this); #if defined(LATE_DISASM) // GT_CNS_INT is considered small, so ReplaceWith() won't copy all fields if ((tree->gtOper == GT_CNS_INT) && tree->IsIconHandle()) { copy->AsIntCon()->gtCompileTimeHandle = tree->AsIntCon()->gtCompileTimeHandle; } #endif DEBUG_DESTROY_NODE(tree); tree = copy; } #endif // DEBUG if (fgGlobalMorph) { /* Ensure that we haven't morphed this node already */ assert(((tree->gtDebugFlags & GTF_DEBUG_NODE_MORPHED) == 0) && "ERROR: Already morphed this node!"); /* Before morphing the tree, we try to propagate any active assertions */ if (optLocalAssertionProp) { /* Do we have any active assertions? */ if (optAssertionCount > 0) { GenTree* newTree = tree; while (newTree != nullptr) { tree = newTree; /* newTree is non-Null if we propagated an assertion */ newTree = optAssertionProp(apFull, tree, nullptr, nullptr); } assert(tree != nullptr); } } PREFAST_ASSUME(tree != nullptr); } /* Save the original un-morphed tree for fgMorphTreeDone */ GenTree* oldTree = tree; /* Figure out what kind of a node we have */ unsigned kind = tree->OperKind(); /* Is this a constant node? */ if (tree->OperIsConst()) { tree = fgMorphConst(tree); goto DONE; } /* Is this a leaf node? */ if (kind & GTK_LEAF) { tree = fgMorphLeaf(tree); goto DONE; } /* Is it a 'simple' unary/binary operator? */ if (kind & GTK_SMPOP) { tree = fgMorphSmpOp(tree, mac); goto DONE; } /* See what kind of a special operator we have here */ switch (tree->OperGet()) { case GT_CALL: if (tree->OperMayThrow(this)) { tree->gtFlags |= GTF_EXCEPT; } else { tree->gtFlags &= ~GTF_EXCEPT; } tree = fgMorphCall(tree->AsCall()); break; #if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS) #if defined(FEATURE_SIMD) case GT_SIMD: #endif #if defined(FEATURE_HW_INTRINSICS) case GT_HWINTRINSIC: #endif tree = fgMorphMultiOp(tree->AsMultiOp()); break; #endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS) case GT_ARR_ELEM: tree->AsArrElem()->gtArrObj = fgMorphTree(tree->AsArrElem()->gtArrObj); unsigned dim; for (dim = 0; dim < tree->AsArrElem()->gtArrRank; dim++) { tree->AsArrElem()->gtArrInds[dim] = fgMorphTree(tree->AsArrElem()->gtArrInds[dim]); } tree->gtFlags &= ~GTF_CALL; tree->gtFlags |= tree->AsArrElem()->gtArrObj->gtFlags & GTF_ALL_EFFECT; for (dim = 0; dim < tree->AsArrElem()->gtArrRank; dim++) { tree->gtFlags |= tree->AsArrElem()->gtArrInds[dim]->gtFlags & GTF_ALL_EFFECT; } if (fgGlobalMorph) { fgSetRngChkTarget(tree, false); } break; case GT_ARR_OFFSET: tree->AsArrOffs()->gtOffset = fgMorphTree(tree->AsArrOffs()->gtOffset); tree->AsArrOffs()->gtIndex = fgMorphTree(tree->AsArrOffs()->gtIndex); tree->AsArrOffs()->gtArrObj = fgMorphTree(tree->AsArrOffs()->gtArrObj); tree->gtFlags &= ~GTF_CALL; tree->gtFlags |= tree->AsArrOffs()->gtOffset->gtFlags & GTF_ALL_EFFECT; tree->gtFlags |= tree->AsArrOffs()->gtIndex->gtFlags & GTF_ALL_EFFECT; tree->gtFlags |= tree->AsArrOffs()->gtArrObj->gtFlags & GTF_ALL_EFFECT; if (fgGlobalMorph) { fgSetRngChkTarget(tree, false); } break; case GT_PHI: tree->gtFlags &= ~GTF_ALL_EFFECT; for (GenTreePhi::Use& use : tree->AsPhi()->Uses()) { use.SetNode(fgMorphTree(use.GetNode())); tree->gtFlags |= use.GetNode()->gtFlags & GTF_ALL_EFFECT; } break; case GT_FIELD_LIST: tree->gtFlags &= ~GTF_ALL_EFFECT; for (GenTreeFieldList::Use& use : tree->AsFieldList()->Uses()) { use.SetNode(fgMorphTree(use.GetNode())); tree->gtFlags |= (use.GetNode()->gtFlags & GTF_ALL_EFFECT); } break; case GT_CMPXCHG: tree->AsCmpXchg()->gtOpLocation = fgMorphTree(tree->AsCmpXchg()->gtOpLocation); tree->AsCmpXchg()->gtOpValue = fgMorphTree(tree->AsCmpXchg()->gtOpValue); tree->AsCmpXchg()->gtOpComparand = fgMorphTree(tree->AsCmpXchg()->gtOpComparand); tree->gtFlags &= (~GTF_EXCEPT & ~GTF_CALL); tree->gtFlags |= tree->AsCmpXchg()->gtOpLocation->gtFlags & GTF_ALL_EFFECT; tree->gtFlags |= tree->AsCmpXchg()->gtOpValue->gtFlags & GTF_ALL_EFFECT; tree->gtFlags |= tree->AsCmpXchg()->gtOpComparand->gtFlags & GTF_ALL_EFFECT; break; case GT_STORE_DYN_BLK: tree = fgMorphStoreDynBlock(tree->AsStoreDynBlk()); break; default: #ifdef DEBUG gtDispTree(tree); #endif noway_assert(!"unexpected operator"); } DONE: fgMorphTreeDone(tree, oldTree DEBUGARG(thisMorphNum)); return tree; } //------------------------------------------------------------------------ // fgKillDependentAssertionsSingle: Kill all assertions specific to lclNum // // Arguments: // lclNum - The varNum of the lclVar for which we're killing assertions. // tree - (DEBUG only) the tree responsible for killing its assertions. // void Compiler::fgKillDependentAssertionsSingle(unsigned lclNum DEBUGARG(GenTree* tree)) { /* All dependent assertions are killed here */ ASSERT_TP killed = BitVecOps::MakeCopy(apTraits, GetAssertionDep(lclNum)); if (killed) { AssertionIndex index = optAssertionCount; while (killed && (index > 0)) { if (BitVecOps::IsMember(apTraits, killed, index - 1)) { #ifdef DEBUG AssertionDsc* curAssertion = optGetAssertion(index); noway_assert((curAssertion->op1.lcl.lclNum == lclNum) || ((curAssertion->op2.kind == O2K_LCLVAR_COPY) && (curAssertion->op2.lcl.lclNum == lclNum))); if (verbose) { printf("\nThe assignment "); printTreeID(tree); printf(" using V%02u removes: ", curAssertion->op1.lcl.lclNum); optPrintAssertion(curAssertion); } #endif // Remove this bit from the killed mask BitVecOps::RemoveElemD(apTraits, killed, index - 1); optAssertionRemove(index); } index--; } // killed mask should now be zero noway_assert(BitVecOps::IsEmpty(apTraits, killed)); } } //------------------------------------------------------------------------ // fgKillDependentAssertions: Kill all dependent assertions with regard to lclNum. // // Arguments: // lclNum - The varNum of the lclVar for which we're killing assertions. // tree - (DEBUG only) the tree responsible for killing its assertions. // // Notes: // For structs and struct fields, it will invalidate the children and parent // respectively. // Calls fgKillDependentAssertionsSingle to kill the assertions for a single lclVar. // void Compiler::fgKillDependentAssertions(unsigned lclNum DEBUGARG(GenTree* tree)) { LclVarDsc* varDsc = lvaGetDesc(lclNum); if (varDsc->lvPromoted) { noway_assert(varTypeIsStruct(varDsc)); // Kill the field locals. for (unsigned i = varDsc->lvFieldLclStart; i < varDsc->lvFieldLclStart + varDsc->lvFieldCnt; ++i) { fgKillDependentAssertionsSingle(i DEBUGARG(tree)); } // Kill the struct local itself. fgKillDependentAssertionsSingle(lclNum DEBUGARG(tree)); } else if (varDsc->lvIsStructField) { // Kill the field local. fgKillDependentAssertionsSingle(lclNum DEBUGARG(tree)); // Kill the parent struct. fgKillDependentAssertionsSingle(varDsc->lvParentLcl DEBUGARG(tree)); } else { fgKillDependentAssertionsSingle(lclNum DEBUGARG(tree)); } } /***************************************************************************** * * This function is called to complete the morphing of a tree node * It should only be called once for each node. * If DEBUG is defined the flag GTF_DEBUG_NODE_MORPHED is checked and updated, * to enforce the invariant that each node is only morphed once. * If local assertion prop is enabled the result tree may be replaced * by an equivalent tree. * */ void Compiler::fgMorphTreeDone(GenTree* tree, GenTree* oldTree /* == NULL */ DEBUGARG(int morphNum)) { #ifdef DEBUG if (verbose && treesBeforeAfterMorph) { printf("\nfgMorphTree (after %d):\n", morphNum); gtDispTree(tree); printf(""); // in our logic this causes a flush } #endif if (!fgGlobalMorph) { return; } if ((oldTree != nullptr) && (oldTree != tree)) { /* Ensure that we have morphed this node */ assert((tree->gtDebugFlags & GTF_DEBUG_NODE_MORPHED) && "ERROR: Did not morph this node!"); #ifdef DEBUG TransferTestDataToNode(oldTree, tree); #endif } else { // Ensure that we haven't morphed this node already assert(((tree->gtDebugFlags & GTF_DEBUG_NODE_MORPHED) == 0) && "ERROR: Already morphed this node!"); } if (tree->OperIsConst()) { goto DONE; } if (!optLocalAssertionProp) { goto DONE; } /* Do we have any active assertions? */ if (optAssertionCount > 0) { /* Is this an assignment to a local variable */ GenTreeLclVarCommon* lclVarTree = nullptr; // The check below will miss LIR-style assignments. // // But we shouldn't be running local assertion prop on these, // as local prop gets disabled when we run global prop. assert(!tree->OperIs(GT_STORE_LCL_VAR, GT_STORE_LCL_FLD)); // DefinesLocal can return true for some BLK op uses, so // check what gets assigned only when we're at an assignment. if (tree->OperIs(GT_ASG) && tree->DefinesLocal(this, &lclVarTree)) { unsigned lclNum = lclVarTree->GetLclNum(); noway_assert(lclNum < lvaCount); fgKillDependentAssertions(lclNum DEBUGARG(tree)); } } /* If this tree makes a new assertion - make it available */ optAssertionGen(tree); DONE:; #ifdef DEBUG /* Mark this node as being morphed */ tree->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif } //------------------------------------------------------------------------ // fgFoldConditional: try and fold conditionals and optimize BBJ_COND or // BBJ_SWITCH blocks. // // Argumetns: // block - block to examine // // Returns: // FoldResult indicating what changes were made, if any // Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) { FoldResult result = FoldResult::FOLD_DID_NOTHING; // We don't want to make any code unreachable // if (opts.OptimizationDisabled()) { return result; } if (block->bbJumpKind == BBJ_COND) { noway_assert(block->bbStmtList != nullptr && block->bbStmtList->GetPrevStmt() != nullptr); Statement* lastStmt = block->lastStmt(); noway_assert(lastStmt->GetNextStmt() == nullptr); if (lastStmt->GetRootNode()->gtOper == GT_CALL) { noway_assert(fgRemoveRestOfBlock); // Unconditional throw - transform the basic block into a BBJ_THROW // fgConvertBBToThrowBB(block); result = FoldResult::FOLD_CHANGED_CONTROL_FLOW; JITDUMP("\nConditional folded at " FMT_BB "\n", block->bbNum); JITDUMP(FMT_BB " becomes a BBJ_THROW\n", block->bbNum); return result; } noway_assert(lastStmt->GetRootNode()->gtOper == GT_JTRUE); /* Did we fold the conditional */ noway_assert(lastStmt->GetRootNode()->AsOp()->gtOp1); GenTree* condTree; condTree = lastStmt->GetRootNode()->AsOp()->gtOp1; GenTree* cond; cond = condTree->gtEffectiveVal(true); if (cond->OperIsConst()) { /* Yupee - we folded the conditional! * Remove the conditional statement */ noway_assert(cond->gtOper == GT_CNS_INT); noway_assert((block->bbNext->countOfInEdges() > 0) && (block->bbJumpDest->countOfInEdges() > 0)); if (condTree != cond) { // Preserve any side effects assert(condTree->OperIs(GT_COMMA)); lastStmt->SetRootNode(condTree); result = FoldResult::FOLD_ALTERED_LAST_STMT; } else { // no side effects, remove the jump entirely fgRemoveStmt(block, lastStmt); result = FoldResult::FOLD_REMOVED_LAST_STMT; } // block is a BBJ_COND that we are folding the conditional for. // bTaken is the path that will always be taken from block. // bNotTaken is the path that will never be taken from block. // BasicBlock* bTaken; BasicBlock* bNotTaken; if (cond->AsIntCon()->gtIconVal != 0) { /* JTRUE 1 - transform the basic block into a BBJ_ALWAYS */ block->bbJumpKind = BBJ_ALWAYS; bTaken = block->bbJumpDest; bNotTaken = block->bbNext; } else { /* Unmark the loop if we are removing a backwards branch */ /* dest block must also be marked as a loop head and */ /* We must be able to reach the backedge block */ if ((block->bbJumpDest->isLoopHead()) && (block->bbJumpDest->bbNum <= block->bbNum) && fgReachable(block->bbJumpDest, block)) { optUnmarkLoopBlocks(block->bbJumpDest, block); } /* JTRUE 0 - transform the basic block into a BBJ_NONE */ block->bbJumpKind = BBJ_NONE; bTaken = block->bbNext; bNotTaken = block->bbJumpDest; } if (fgHaveValidEdgeWeights) { // We are removing an edge from block to bNotTaken // and we have already computed the edge weights, so // we will try to adjust some of the weights // flowList* edgeTaken = fgGetPredForBlock(bTaken, block); BasicBlock* bUpdated = nullptr; // non-NULL if we updated the weight of an internal block // We examine the taken edge (block -> bTaken) // if block has valid profile weight and bTaken does not we try to adjust bTaken's weight // else if bTaken has valid profile weight and block does not we try to adjust block's weight // We can only adjust the block weights when (the edge block -> bTaken) is the only edge into bTaken // if (block->hasProfileWeight()) { // The edge weights for (block -> bTaken) are 100% of block's weight edgeTaken->setEdgeWeights(block->bbWeight, block->bbWeight, bTaken); if (!bTaken->hasProfileWeight()) { if ((bTaken->countOfInEdges() == 1) || (bTaken->bbWeight < block->bbWeight)) { // Update the weight of bTaken bTaken->inheritWeight(block); bUpdated = bTaken; } } } else if (bTaken->hasProfileWeight()) { if (bTaken->countOfInEdges() == 1) { // There is only one in edge to bTaken edgeTaken->setEdgeWeights(bTaken->bbWeight, bTaken->bbWeight, bTaken); // Update the weight of block block->inheritWeight(bTaken); bUpdated = block; } } if (bUpdated != nullptr) { weight_t newMinWeight; weight_t newMaxWeight; flowList* edge; // Now fix the weights of the edges out of 'bUpdated' switch (bUpdated->bbJumpKind) { case BBJ_NONE: edge = fgGetPredForBlock(bUpdated->bbNext, bUpdated); newMaxWeight = bUpdated->bbWeight; newMinWeight = min(edge->edgeWeightMin(), newMaxWeight); edge->setEdgeWeights(newMinWeight, newMaxWeight, bUpdated->bbNext); break; case BBJ_COND: edge = fgGetPredForBlock(bUpdated->bbNext, bUpdated); newMaxWeight = bUpdated->bbWeight; newMinWeight = min(edge->edgeWeightMin(), newMaxWeight); edge->setEdgeWeights(newMinWeight, newMaxWeight, bUpdated->bbNext); FALLTHROUGH; case BBJ_ALWAYS: edge = fgGetPredForBlock(bUpdated->bbJumpDest, bUpdated); newMaxWeight = bUpdated->bbWeight; newMinWeight = min(edge->edgeWeightMin(), newMaxWeight); edge->setEdgeWeights(newMinWeight, newMaxWeight, bUpdated->bbNext); break; default: // We don't handle BBJ_SWITCH break; } } } /* modify the flow graph */ /* Remove 'block' from the predecessor list of 'bNotTaken' */ fgRemoveRefPred(bNotTaken, block); #ifdef DEBUG if (verbose) { printf("\nConditional folded at " FMT_BB "\n", block->bbNum); printf(FMT_BB " becomes a %s", block->bbNum, block->bbJumpKind == BBJ_ALWAYS ? "BBJ_ALWAYS" : "BBJ_NONE"); if (block->bbJumpKind == BBJ_ALWAYS) { printf(" to " FMT_BB, block->bbJumpDest->bbNum); } printf("\n"); } #endif /* if the block was a loop condition we may have to modify * the loop table */ for (unsigned loopNum = 0; loopNum < optLoopCount; loopNum++) { /* Some loops may have been already removed by * loop unrolling or conditional folding */ if (optLoopTable[loopNum].lpFlags & LPFLG_REMOVED) { continue; } /* We are only interested in the loop bottom */ if (optLoopTable[loopNum].lpBottom == block) { if (cond->AsIntCon()->gtIconVal == 0) { /* This was a bogus loop (condition always false) * Remove the loop from the table */ optMarkLoopRemoved(loopNum); optLoopTable[loopNum].lpTop->unmarkLoopAlign(this DEBUG_ARG("Bogus loop")); #ifdef DEBUG if (verbose) { printf("Removing loop " FMT_LP " (from " FMT_BB " to " FMT_BB ")\n\n", loopNum, optLoopTable[loopNum].lpTop->bbNum, optLoopTable[loopNum].lpBottom->bbNum); } #endif } } } } } else if (block->bbJumpKind == BBJ_SWITCH) { noway_assert(block->bbStmtList != nullptr && block->bbStmtList->GetPrevStmt() != nullptr); Statement* lastStmt = block->lastStmt(); noway_assert(lastStmt->GetNextStmt() == nullptr); if (lastStmt->GetRootNode()->gtOper == GT_CALL) { noway_assert(fgRemoveRestOfBlock); // Unconditional throw - transform the basic block into a BBJ_THROW // fgConvertBBToThrowBB(block); result = FoldResult::FOLD_CHANGED_CONTROL_FLOW; JITDUMP("\nConditional folded at " FMT_BB "\n", block->bbNum); JITDUMP(FMT_BB " becomes a BBJ_THROW\n", block->bbNum); return result; } noway_assert(lastStmt->GetRootNode()->gtOper == GT_SWITCH); /* Did we fold the conditional */ noway_assert(lastStmt->GetRootNode()->AsOp()->gtOp1); GenTree* condTree; condTree = lastStmt->GetRootNode()->AsOp()->gtOp1; GenTree* cond; cond = condTree->gtEffectiveVal(true); if (cond->OperIsConst()) { /* Yupee - we folded the conditional! * Remove the conditional statement */ noway_assert(cond->gtOper == GT_CNS_INT); if (condTree != cond) { // Preserve any side effects assert(condTree->OperIs(GT_COMMA)); lastStmt->SetRootNode(condTree); result = FoldResult::FOLD_ALTERED_LAST_STMT; } else { // no side effects, remove the switch entirely fgRemoveStmt(block, lastStmt); result = FoldResult::FOLD_REMOVED_LAST_STMT; } /* modify the flow graph */ /* Find the actual jump target */ unsigned switchVal; switchVal = (unsigned)cond->AsIntCon()->gtIconVal; unsigned jumpCnt; jumpCnt = block->bbJumpSwt->bbsCount; BasicBlock** jumpTab; jumpTab = block->bbJumpSwt->bbsDstTab; bool foundVal; foundVal = false; for (unsigned val = 0; val < jumpCnt; val++, jumpTab++) { BasicBlock* curJump = *jumpTab; assert(curJump->countOfInEdges() > 0); // If val matches switchVal or we are at the last entry and // we never found the switch value then set the new jump dest if ((val == switchVal) || (!foundVal && (val == jumpCnt - 1))) { if (curJump != block->bbNext) { /* transform the basic block into a BBJ_ALWAYS */ block->bbJumpKind = BBJ_ALWAYS; block->bbJumpDest = curJump; } else { /* transform the basic block into a BBJ_NONE */ block->bbJumpKind = BBJ_NONE; } foundVal = true; } else { /* Remove 'block' from the predecessor list of 'curJump' */ fgRemoveRefPred(curJump, block); } } assert(foundVal); #ifdef DEBUG if (verbose) { printf("\nConditional folded at " FMT_BB "\n", block->bbNum); printf(FMT_BB " becomes a %s", block->bbNum, block->bbJumpKind == BBJ_ALWAYS ? "BBJ_ALWAYS" : "BBJ_NONE"); if (block->bbJumpKind == BBJ_ALWAYS) { printf(" to " FMT_BB, block->bbJumpDest->bbNum); } printf("\n"); } #endif } } return result; } //------------------------------------------------------------------------ // fgMorphBlockStmt: morph a single statement in a block. // // Arguments: // block - block containing the statement // stmt - statement to morph // msg - string to identify caller in a dump // // Returns: // true if 'stmt' was removed from the block. // s false if 'stmt' is still in the block (even if other statements were removed). // // Notes: // Can be called anytime, unlike fgMorphStmts() which should only be called once. // bool Compiler::fgMorphBlockStmt(BasicBlock* block, Statement* stmt DEBUGARG(const char* msg)) { assert(block != nullptr); assert(stmt != nullptr); // Reset some ambient state fgRemoveRestOfBlock = false; compCurBB = block; compCurStmt = stmt; GenTree* morph = fgMorphTree(stmt->GetRootNode()); // Bug 1106830 - During the CSE phase we can't just remove // morph->AsOp()->gtOp2 as it could contain CSE expressions. // This leads to a noway_assert in OptCSE.cpp when // searching for the removed CSE ref. (using gtFindLink) // if (!optValnumCSE_phase) { // Check for morph as a GT_COMMA with an unconditional throw if (fgIsCommaThrow(morph, true)) { #ifdef DEBUG if (verbose) { printf("Folding a top-level fgIsCommaThrow stmt\n"); printf("Removing op2 as unreachable:\n"); gtDispTree(morph->AsOp()->gtOp2); printf("\n"); } #endif // Use the call as the new stmt morph = morph->AsOp()->gtOp1; noway_assert(morph->gtOper == GT_CALL); } // we can get a throw as a statement root if (fgIsThrow(morph)) { #ifdef DEBUG if (verbose) { printf("We have a top-level fgIsThrow stmt\n"); printf("Removing the rest of block as unreachable:\n"); } #endif noway_assert((morph->gtFlags & GTF_COLON_COND) == 0); fgRemoveRestOfBlock = true; } } stmt->SetRootNode(morph); // Can the entire tree be removed? bool removedStmt = false; // Defer removing statements during CSE so we don't inadvertently remove any CSE defs. if (!optValnumCSE_phase) { removedStmt = fgCheckRemoveStmt(block, stmt); } // Or this is the last statement of a conditional branch that was just folded? if (!removedStmt && (stmt->GetNextStmt() == nullptr) && !fgRemoveRestOfBlock) { FoldResult const fr = fgFoldConditional(block); removedStmt = (fr == FoldResult::FOLD_REMOVED_LAST_STMT); } if (!removedStmt) { // Have to re-do the evaluation order since for example some later code does not expect constants as op1 gtSetStmtInfo(stmt); // Have to re-link the nodes for this statement fgSetStmtSeq(stmt); } #ifdef DEBUG if (verbose) { printf("%s %s tree:\n", msg, (removedStmt ? "removed" : "morphed")); gtDispTree(morph); printf("\n"); } #endif if (fgRemoveRestOfBlock) { // Remove the rest of the stmts in the block for (Statement* removeStmt : StatementList(stmt->GetNextStmt())) { fgRemoveStmt(block, removeStmt); } // The rest of block has been removed and we will always throw an exception. // // For compDbgCode, we prepend an empty BB as the firstBB, it is BBJ_NONE. // We should not convert it to a ThrowBB. if ((block != fgFirstBB) || ((fgFirstBB->bbFlags & BBF_INTERNAL) == 0)) { // Convert block to a throw bb fgConvertBBToThrowBB(block); } #ifdef DEBUG if (verbose) { printf("\n%s Block " FMT_BB " becomes a throw block.\n", msg, block->bbNum); } #endif fgRemoveRestOfBlock = false; } return removedStmt; } /***************************************************************************** * * Morph the statements of the given block. * This function should be called just once for a block. Use fgMorphBlockStmt() * for reentrant calls. */ void Compiler::fgMorphStmts(BasicBlock* block) { fgRemoveRestOfBlock = false; fgCurrentlyInUseArgTemps = hashBv::Create(this); for (Statement* const stmt : block->Statements()) { if (fgRemoveRestOfBlock) { fgRemoveStmt(block, stmt); continue; } #ifdef FEATURE_SIMD if (opts.OptimizationEnabled() && stmt->GetRootNode()->TypeGet() == TYP_FLOAT && stmt->GetRootNode()->OperGet() == GT_ASG) { fgMorphCombineSIMDFieldAssignments(block, stmt); } #endif fgMorphStmt = stmt; compCurStmt = stmt; GenTree* oldTree = stmt->GetRootNode(); #ifdef DEBUG unsigned oldHash = verbose ? gtHashValue(oldTree) : DUMMY_INIT(~0); if (verbose) { printf("\nfgMorphTree " FMT_BB ", " FMT_STMT " (before)\n", block->bbNum, stmt->GetID()); gtDispTree(oldTree); } #endif /* Morph this statement tree */ GenTree* morphedTree = fgMorphTree(oldTree); // mark any outgoing arg temps as free so we can reuse them in the next statement. fgCurrentlyInUseArgTemps->ZeroAll(); // Has fgMorphStmt been sneakily changed ? if ((stmt->GetRootNode() != oldTree) || (block != compCurBB)) { if (stmt->GetRootNode() != oldTree) { /* This must be tailcall. Ignore 'morphedTree' and carry on with the tail-call node */ morphedTree = stmt->GetRootNode(); } else { /* This must be a tailcall that caused a GCPoll to get injected. We haven't actually morphed the call yet but the flag still got set, clear it here... */ CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG morphedTree->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED; #endif } noway_assert(compTailCallUsed); noway_assert(morphedTree->gtOper == GT_CALL); GenTreeCall* call = morphedTree->AsCall(); // Could be // - a fast call made as jmp in which case block will be ending with // BBJ_RETURN (as we need epilog) and marked as containing a jmp. // - a tailcall dispatched via JIT helper, on x86, in which case // block will be ending with BBJ_THROW. // - a tail call dispatched via runtime help (IL stubs), in which // case there will not be any tailcall and the block will be ending // with BBJ_RETURN (as normal control flow) noway_assert((call->IsFastTailCall() && (compCurBB->bbJumpKind == BBJ_RETURN) && ((compCurBB->bbFlags & BBF_HAS_JMP)) != 0) || (call->IsTailCallViaJitHelper() && (compCurBB->bbJumpKind == BBJ_THROW)) || (!call->IsTailCall() && (compCurBB->bbJumpKind == BBJ_RETURN))); } #ifdef DEBUG if (compStressCompile(STRESS_CLONE_EXPR, 30)) { // Clone all the trees to stress gtCloneExpr() if (verbose) { printf("\nfgMorphTree (stressClone from):\n"); gtDispTree(morphedTree); } morphedTree = gtCloneExpr(morphedTree); noway_assert(morphedTree != nullptr); if (verbose) { printf("\nfgMorphTree (stressClone to):\n"); gtDispTree(morphedTree); } } /* If the hash value changes. we modified the tree during morphing */ if (verbose) { unsigned newHash = gtHashValue(morphedTree); if (newHash != oldHash) { printf("\nfgMorphTree " FMT_BB ", " FMT_STMT " (after)\n", block->bbNum, stmt->GetID()); gtDispTree(morphedTree); } } #endif /* Check for morphedTree as a GT_COMMA with an unconditional throw */ if (!gtIsActiveCSE_Candidate(morphedTree) && fgIsCommaThrow(morphedTree, true)) { /* Use the call as the new stmt */ morphedTree = morphedTree->AsOp()->gtOp1; noway_assert(morphedTree->gtOper == GT_CALL); noway_assert((morphedTree->gtFlags & GTF_COLON_COND) == 0); fgRemoveRestOfBlock = true; } stmt->SetRootNode(morphedTree); if (fgRemoveRestOfBlock) { continue; } /* Has the statement been optimized away */ if (fgCheckRemoveStmt(block, stmt)) { continue; } /* Check if this block ends with a conditional branch that can be folded */ if (fgFoldConditional(block) != FoldResult::FOLD_DID_NOTHING) { continue; } if (ehBlockHasExnFlowDsc(block)) { continue; } } if (fgRemoveRestOfBlock) { if ((block->bbJumpKind == BBJ_COND) || (block->bbJumpKind == BBJ_SWITCH)) { Statement* first = block->firstStmt(); noway_assert(first); Statement* lastStmt = block->lastStmt(); noway_assert(lastStmt && lastStmt->GetNextStmt() == nullptr); GenTree* last = lastStmt->GetRootNode(); if (((block->bbJumpKind == BBJ_COND) && (last->gtOper == GT_JTRUE)) || ((block->bbJumpKind == BBJ_SWITCH) && (last->gtOper == GT_SWITCH))) { GenTree* op1 = last->AsOp()->gtOp1; if (op1->OperIsCompare()) { /* Unmark the comparison node with GTF_RELOP_JMP_USED */ op1->gtFlags &= ~GTF_RELOP_JMP_USED; } lastStmt->SetRootNode(fgMorphTree(op1)); } } /* Mark block as a BBJ_THROW block */ fgConvertBBToThrowBB(block); } #if FEATURE_FASTTAILCALL GenTree* recursiveTailCall = nullptr; if (block->endsWithTailCallConvertibleToLoop(this, &recursiveTailCall)) { fgMorphRecursiveFastTailCallIntoLoop(block, recursiveTailCall->AsCall()); } #endif // Reset this back so that it doesn't leak out impacting other blocks fgRemoveRestOfBlock = false; } /***************************************************************************** * * Morph the blocks of the method. * Returns true if the basic block list is modified. * This function should be called just once. */ void Compiler::fgMorphBlocks() { #ifdef DEBUG if (verbose) { printf("\n*************** In fgMorphBlocks()\n"); } #endif /* Since fgMorphTree can be called after various optimizations to re-arrange * the nodes we need a global flag to signal if we are during the one-pass * global morphing */ fgGlobalMorph = true; // // Local assertion prop is enabled if we are optimized // optLocalAssertionProp = opts.OptimizationEnabled(); if (optLocalAssertionProp) { // // Initialize for local assertion prop // optAssertionInit(true); } if (!compEnregLocals()) { // Morph is checking if lvDoNotEnregister is already set for some optimizations. // If we are running without `CLFLG_REGVAR` flag set (`compEnregLocals() == false`) // then we already know that we won't enregister any locals and it is better to set // this flag before we start reading it. // The main reason why this flag is not set is that we are running in minOpts. lvSetMinOptsDoNotEnreg(); } /*------------------------------------------------------------------------- * Process all basic blocks in the function */ BasicBlock* block = fgFirstBB; noway_assert(block); do { #ifdef DEBUG if (verbose) { printf("\nMorphing " FMT_BB " of '%s'\n", block->bbNum, info.compFullName); } #endif if (optLocalAssertionProp) { // // Clear out any currently recorded assertion candidates // before processing each basic block, // also we must handle QMARK-COLON specially // optAssertionReset(0); } // Make the current basic block address available globally. compCurBB = block; // Process all statement trees in the basic block. fgMorphStmts(block); // Do we need to merge the result of this block into a single return block? if ((block->bbJumpKind == BBJ_RETURN) && ((block->bbFlags & BBF_HAS_JMP) == 0)) { if ((genReturnBB != nullptr) && (genReturnBB != block)) { fgMergeBlockReturn(block); } } block = block->bbNext; } while (block != nullptr); // We are done with the global morphing phase fgGlobalMorph = false; compCurBB = nullptr; // Under OSR, we no longer need to specially protect the original method entry // if (opts.IsOSR() && (fgEntryBB != nullptr) && (fgEntryBB->bbFlags & BBF_IMPORTED)) { JITDUMP("OSR: un-protecting original method entry " FMT_BB "\n", fgEntryBB->bbNum); assert(fgEntryBB->bbRefs > 0); fgEntryBB->bbRefs--; // We don't need to remember this block anymore. fgEntryBB = nullptr; } #ifdef DEBUG if (verboseTrees) { fgDispBasicBlocks(true); } #endif } //------------------------------------------------------------------------ // fgMergeBlockReturn: assign the block return value (if any) into the single return temp // and branch to the single return block. // // Arguments: // block - the block to process. // // Notes: // A block is not guaranteed to have a last stmt if its jump kind is BBJ_RETURN. // For example a method returning void could have an empty block with jump kind BBJ_RETURN. // Such blocks do materialize as part of in-lining. // // A block with jump kind BBJ_RETURN does not necessarily need to end with GT_RETURN. // It could end with a tail call or rejected tail call or monitor.exit or a GT_INTRINSIC. // For now it is safe to explicitly check whether last stmt is GT_RETURN if genReturnLocal // is BAD_VAR_NUM. // void Compiler::fgMergeBlockReturn(BasicBlock* block) { assert((block->bbJumpKind == BBJ_RETURN) && ((block->bbFlags & BBF_HAS_JMP) == 0)); assert((genReturnBB != nullptr) && (genReturnBB != block)); // TODO: Need to characterize the last top level stmt of a block ending with BBJ_RETURN. Statement* lastStmt = block->lastStmt(); GenTree* ret = (lastStmt != nullptr) ? lastStmt->GetRootNode() : nullptr; if ((ret != nullptr) && (ret->OperGet() == GT_RETURN) && ((ret->gtFlags & GTF_RET_MERGED) != 0)) { // This return was generated during epilog merging, so leave it alone } else { // We'll jump to the genReturnBB. CLANG_FORMAT_COMMENT_ANCHOR; #if !defined(TARGET_X86) if (info.compFlags & CORINFO_FLG_SYNCH) { fgConvertSyncReturnToLeave(block); } else #endif // !TARGET_X86 { block->bbJumpKind = BBJ_ALWAYS; block->bbJumpDest = genReturnBB; fgAddRefPred(genReturnBB, block); fgReturnCount--; } if (genReturnLocal != BAD_VAR_NUM) { // replace the GT_RETURN node to be a GT_ASG that stores the return value into genReturnLocal. // Method must be returning a value other than TYP_VOID. noway_assert(compMethodHasRetVal()); // This block must be ending with a GT_RETURN noway_assert(lastStmt != nullptr); noway_assert(lastStmt->GetNextStmt() == nullptr); noway_assert(ret != nullptr); // GT_RETURN must have non-null operand as the method is returning the value assigned to // genReturnLocal noway_assert(ret->OperGet() == GT_RETURN); noway_assert(ret->gtGetOp1() != nullptr); Statement* pAfterStatement = lastStmt; const DebugInfo& di = lastStmt->GetDebugInfo(); GenTree* tree = gtNewTempAssign(genReturnLocal, ret->gtGetOp1(), &pAfterStatement, di, block); if (tree->OperIsCopyBlkOp()) { tree = fgMorphCopyBlock(tree); } else if (tree->OperIsInitBlkOp()) { tree = fgMorphInitBlock(tree); } if (pAfterStatement == lastStmt) { lastStmt->SetRootNode(tree); } else { // gtNewTempAssign inserted additional statements after last fgRemoveStmt(block, lastStmt); Statement* newStmt = gtNewStmt(tree, di); fgInsertStmtAfter(block, pAfterStatement, newStmt); lastStmt = newStmt; } } else if (ret != nullptr && ret->OperGet() == GT_RETURN) { // This block ends with a GT_RETURN noway_assert(lastStmt != nullptr); noway_assert(lastStmt->GetNextStmt() == nullptr); // Must be a void GT_RETURN with null operand; delete it as this block branches to oneReturn // block noway_assert(ret->TypeGet() == TYP_VOID); noway_assert(ret->gtGetOp1() == nullptr); fgRemoveStmt(block, lastStmt); } JITDUMP("\nUpdate " FMT_BB " to jump to common return block.\n", block->bbNum); DISPBLOCK(block); if (block->hasProfileWeight()) { weight_t const oldWeight = genReturnBB->hasProfileWeight() ? genReturnBB->bbWeight : BB_ZERO_WEIGHT; weight_t const newWeight = oldWeight + block->bbWeight; JITDUMP("merging profile weight " FMT_WT " from " FMT_BB " to common return " FMT_BB "\n", block->bbWeight, block->bbNum, genReturnBB->bbNum); genReturnBB->setBBProfileWeight(newWeight); DISPBLOCK(genReturnBB); } } } /***************************************************************************** * * Make some decisions about the kind of code to generate. */ void Compiler::fgSetOptions() { #ifdef DEBUG /* Should we force fully interruptible code ? */ if (JitConfig.JitFullyInt() || compStressCompile(STRESS_GENERIC_VARN, 30)) { noway_assert(!codeGen->isGCTypeFixed()); SetInterruptible(true); } #endif if (opts.compDbgCode) { assert(!codeGen->isGCTypeFixed()); SetInterruptible(true); // debugging is easier this way ... } /* Assume we won't need an explicit stack frame if this is allowed */ if (compLocallocUsed) { codeGen->setFramePointerRequired(true); } #ifdef TARGET_X86 if (compTailCallUsed) codeGen->setFramePointerRequired(true); #endif // TARGET_X86 if (!opts.genFPopt) { codeGen->setFramePointerRequired(true); } // Assert that the EH table has been initialized by now. Note that // compHndBBtabAllocCount never decreases; it is a high-water mark // of table allocation. In contrast, compHndBBtabCount does shrink // if we delete a dead EH region, and if it shrinks to zero, the // table pointer compHndBBtab is unreliable. assert(compHndBBtabAllocCount >= info.compXcptnsCount); #ifdef TARGET_X86 // Note: this case, and the !X86 case below, should both use the // !X86 path. This would require a few more changes for X86 to use // compHndBBtabCount (the current number of EH clauses) instead of // info.compXcptnsCount (the number of EH clauses in IL), such as // in ehNeedsShadowSPslots(). This is because sometimes the IL has // an EH clause that we delete as statically dead code before we // get here, leaving no EH clauses left, and thus no requirement // to use a frame pointer because of EH. But until all the code uses // the same test, leave info.compXcptnsCount here. if (info.compXcptnsCount > 0) { codeGen->setFramePointerRequiredEH(true); } #else // !TARGET_X86 if (compHndBBtabCount > 0) { codeGen->setFramePointerRequiredEH(true); } #endif // TARGET_X86 #ifdef UNIX_X86_ABI if (info.compXcptnsCount > 0) { assert(!codeGen->isGCTypeFixed()); // Enforce fully interruptible codegen for funclet unwinding SetInterruptible(true); } #endif // UNIX_X86_ABI if (compMethodRequiresPInvokeFrame()) { codeGen->setFramePointerRequired(true); // Setup of Pinvoke frame currently requires an EBP style frame } if (info.compPublishStubParam) { codeGen->setFramePointerRequiredGCInfo(true); } if (compIsProfilerHookNeeded()) { codeGen->setFramePointerRequired(true); } if (info.compIsVarArgs) { // Code that initializes lvaVarargsBaseOfStkArgs requires this to be EBP relative. codeGen->setFramePointerRequiredGCInfo(true); } if (lvaReportParamTypeArg()) { codeGen->setFramePointerRequiredGCInfo(true); } // printf("method will %s be fully interruptible\n", GetInterruptible() ? " " : "not"); } /*****************************************************************************/ GenTree* Compiler::fgInitThisClass() { noway_assert(!compIsForInlining()); CORINFO_LOOKUP_KIND kind; info.compCompHnd->getLocationOfThisType(info.compMethodHnd, &kind); if (!kind.needsRuntimeLookup) { return fgGetSharedCCtor(info.compClassHnd); } else { #ifdef FEATURE_READYTORUN // Only CoreRT understands CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE. Don't do this on CoreCLR. if (opts.IsReadyToRun() && IsTargetAbi(CORINFO_CORERT_ABI)) { CORINFO_RESOLVED_TOKEN resolvedToken; memset(&resolvedToken, 0, sizeof(resolvedToken)); // We are in a shared method body, but maybe we don't need a runtime lookup after all. // This covers the case of a generic method on a non-generic type. if (!(info.compClassAttr & CORINFO_FLG_SHAREDINST)) { resolvedToken.hClass = info.compClassHnd; return impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_STATIC_BASE, TYP_BYREF); } // We need a runtime lookup. GenTree* ctxTree = getRuntimeContextTree(kind.runtimeLookupKind); // CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE with a zeroed out resolvedToken means "get the static // base of the class that owns the method being compiled". If we're in this method, it means we're not // inlining and there's no ambiguity. return impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE, TYP_BYREF, gtNewCallArgs(ctxTree), &kind); } #endif // Collectible types requires that for shared generic code, if we use the generic context paramter // that we report it. (This is a conservative approach, we could detect some cases particularly when the // context parameter is this that we don't need the eager reporting logic.) lvaGenericsContextInUse = true; switch (kind.runtimeLookupKind) { case CORINFO_LOOKUP_THISOBJ: { // This code takes a this pointer; but we need to pass the static method desc to get the right point in // the hierarchy GenTree* vtTree = gtNewLclvNode(info.compThisArg, TYP_REF); vtTree->gtFlags |= GTF_VAR_CONTEXT; // Vtable pointer of this object vtTree = gtNewMethodTableLookup(vtTree); GenTree* methodHnd = gtNewIconEmbMethHndNode(info.compMethodHnd); return gtNewHelperCallNode(CORINFO_HELP_INITINSTCLASS, TYP_VOID, gtNewCallArgs(vtTree, methodHnd)); } case CORINFO_LOOKUP_CLASSPARAM: { GenTree* vtTree = gtNewLclvNode(info.compTypeCtxtArg, TYP_I_IMPL); vtTree->gtFlags |= GTF_VAR_CONTEXT; return gtNewHelperCallNode(CORINFO_HELP_INITCLASS, TYP_VOID, gtNewCallArgs(vtTree)); } case CORINFO_LOOKUP_METHODPARAM: { GenTree* methHndTree = gtNewLclvNode(info.compTypeCtxtArg, TYP_I_IMPL); methHndTree->gtFlags |= GTF_VAR_CONTEXT; return gtNewHelperCallNode(CORINFO_HELP_INITINSTCLASS, TYP_VOID, gtNewCallArgs(gtNewIconNode(0), methHndTree)); } default: noway_assert(!"Unknown LOOKUP_KIND"); UNREACHABLE(); } } } #ifdef DEBUG /***************************************************************************** * * Tree walk callback to make sure no GT_QMARK nodes are present in the tree, * except for the allowed ? 1 : 0; pattern. */ Compiler::fgWalkResult Compiler::fgAssertNoQmark(GenTree** tree, fgWalkData* data) { if ((*tree)->OperGet() == GT_QMARK) { fgCheckQmarkAllowedForm(*tree); } return WALK_CONTINUE; } void Compiler::fgCheckQmarkAllowedForm(GenTree* tree) { assert(tree->OperGet() == GT_QMARK); assert(!"Qmarks beyond morph disallowed."); } /***************************************************************************** * * Verify that the importer has created GT_QMARK nodes in a way we can * process them. The following is allowed: * * 1. A top level qmark. Top level qmark is of the form: * a) (bool) ? (void) : (void) OR * b) V0N = (bool) ? (type) : (type) * * 2. Recursion is allowed at the top level, i.e., a GT_QMARK can be a child * of either op1 of colon or op2 of colon but not a child of any other * operator. */ void Compiler::fgPreExpandQmarkChecks(GenTree* expr) { GenTree* topQmark = fgGetTopLevelQmark(expr); // If the top level Qmark is null, then scan the tree to make sure // there are no qmarks within it. if (topQmark == nullptr) { fgWalkTreePre(&expr, Compiler::fgAssertNoQmark, nullptr); } else { // We could probably expand the cond node also, but don't think the extra effort is necessary, // so let's just assert the cond node of a top level qmark doesn't have further top level qmarks. fgWalkTreePre(&topQmark->AsOp()->gtOp1, Compiler::fgAssertNoQmark, nullptr); fgPreExpandQmarkChecks(topQmark->AsOp()->gtOp2->AsOp()->gtOp1); fgPreExpandQmarkChecks(topQmark->AsOp()->gtOp2->AsOp()->gtOp2); } } #endif // DEBUG /***************************************************************************** * * Get the top level GT_QMARK node in a given "expr", return NULL if such a * node is not present. If the top level GT_QMARK node is assigned to a * GT_LCL_VAR, then return the lcl node in ppDst. * */ GenTree* Compiler::fgGetTopLevelQmark(GenTree* expr, GenTree** ppDst /* = NULL */) { if (ppDst != nullptr) { *ppDst = nullptr; } GenTree* topQmark = nullptr; if (expr->gtOper == GT_QMARK) { topQmark = expr; } else if (expr->gtOper == GT_ASG && expr->AsOp()->gtOp2->gtOper == GT_QMARK && expr->AsOp()->gtOp1->gtOper == GT_LCL_VAR) { topQmark = expr->AsOp()->gtOp2; if (ppDst != nullptr) { *ppDst = expr->AsOp()->gtOp1; } } return topQmark; } /********************************************************************************* * * For a castclass helper call, * Importer creates the following tree: * tmp = (op1 == null) ? op1 : ((*op1 == (cse = op2, cse)) ? op1 : helper()); * * This method splits the qmark expression created by the importer into the * following blocks: (block, asg, cond1, cond2, helper, remainder) * Notice that op1 is the result for both the conditions. So we coalesce these * assignments into a single block instead of two blocks resulting a nested diamond. * * +---------->-----------+ * | | | * ^ ^ v * | | | * block-->asg-->cond1--+-->cond2--+-->helper--+-->remainder * * We expect to achieve the following codegen: * mov rsi, rdx tmp = op1 // asgBlock * test rsi, rsi goto skip if tmp == null ? // cond1Block * je SKIP * mov rcx, 0x76543210 cns = op2 // cond2Block * cmp qword ptr [rsi], rcx goto skip if *tmp == op2 * je SKIP * call CORINFO_HELP_CHKCASTCLASS_SPECIAL tmp = helper(cns, tmp) // helperBlock * mov rsi, rax * SKIP: // remainderBlock * tmp has the result. * */ void Compiler::fgExpandQmarkForCastInstOf(BasicBlock* block, Statement* stmt) { #ifdef DEBUG if (verbose) { printf("\nExpanding CastInstOf qmark in " FMT_BB " (before)\n", block->bbNum); fgDispBasicBlocks(block, block, true); } #endif // DEBUG GenTree* expr = stmt->GetRootNode(); GenTree* dst = nullptr; GenTree* qmark = fgGetTopLevelQmark(expr, &dst); noway_assert(dst != nullptr); assert(qmark->gtFlags & GTF_QMARK_CAST_INSTOF); // Get cond, true, false exprs for the qmark. GenTree* condExpr = qmark->gtGetOp1(); GenTree* trueExpr = qmark->gtGetOp2()->AsColon()->ThenNode(); GenTree* falseExpr = qmark->gtGetOp2()->AsColon()->ElseNode(); // Get cond, true, false exprs for the nested qmark. GenTree* nestedQmark = falseExpr; GenTree* cond2Expr; GenTree* true2Expr; GenTree* false2Expr; if (nestedQmark->gtOper == GT_QMARK) { cond2Expr = nestedQmark->gtGetOp1(); true2Expr = nestedQmark->gtGetOp2()->AsColon()->ThenNode(); false2Expr = nestedQmark->gtGetOp2()->AsColon()->ElseNode(); } else { // This is a rare case that arises when we are doing minopts and encounter isinst of null // gtFoldExpr was still is able to optimize away part of the tree (but not all). // That means it does not match our pattern. // Rather than write code to handle this case, just fake up some nodes to make it match the common // case. Synthesize a comparison that is always true, and for the result-on-true, use the // entire subtree we expected to be the nested question op. cond2Expr = gtNewOperNode(GT_EQ, TYP_INT, gtNewIconNode(0, TYP_I_IMPL), gtNewIconNode(0, TYP_I_IMPL)); true2Expr = nestedQmark; false2Expr = gtNewIconNode(0, TYP_I_IMPL); } assert(false2Expr->OperGet() == trueExpr->OperGet()); // Create the chain of blocks. See method header comment. // The order of blocks after this is the following: // block ... asgBlock ... cond1Block ... cond2Block ... helperBlock ... remainderBlock // // We need to remember flags that exist on 'block' that we want to propagate to 'remainderBlock', // if they are going to be cleared by fgSplitBlockAfterStatement(). We currently only do this only // for the GC safe point bit, the logic being that if 'block' was marked gcsafe, then surely // remainderBlock will still be GC safe. BasicBlockFlags propagateFlags = block->bbFlags & BBF_GC_SAFE_POINT; BasicBlock* remainderBlock = fgSplitBlockAfterStatement(block, stmt); fgRemoveRefPred(remainderBlock, block); // We're going to put more blocks between block and remainderBlock. BasicBlock* helperBlock = fgNewBBafter(BBJ_NONE, block, true); BasicBlock* cond2Block = fgNewBBafter(BBJ_COND, block, true); BasicBlock* cond1Block = fgNewBBafter(BBJ_COND, block, true); BasicBlock* asgBlock = fgNewBBafter(BBJ_NONE, block, true); remainderBlock->bbFlags |= propagateFlags; // These blocks are only internal if 'block' is (but they've been set as internal by fgNewBBafter). // If they're not internal, mark them as imported to avoid asserts about un-imported blocks. if ((block->bbFlags & BBF_INTERNAL) == 0) { helperBlock->bbFlags &= ~BBF_INTERNAL; cond2Block->bbFlags &= ~BBF_INTERNAL; cond1Block->bbFlags &= ~BBF_INTERNAL; asgBlock->bbFlags &= ~BBF_INTERNAL; helperBlock->bbFlags |= BBF_IMPORTED; cond2Block->bbFlags |= BBF_IMPORTED; cond1Block->bbFlags |= BBF_IMPORTED; asgBlock->bbFlags |= BBF_IMPORTED; } // Chain the flow correctly. fgAddRefPred(asgBlock, block); fgAddRefPred(cond1Block, asgBlock); fgAddRefPred(cond2Block, cond1Block); fgAddRefPred(helperBlock, cond2Block); fgAddRefPred(remainderBlock, helperBlock); fgAddRefPred(remainderBlock, cond1Block); fgAddRefPred(remainderBlock, cond2Block); cond1Block->bbJumpDest = remainderBlock; cond2Block->bbJumpDest = remainderBlock; // Set the weights; some are guesses. asgBlock->inheritWeight(block); cond1Block->inheritWeight(block); cond2Block->inheritWeightPercentage(cond1Block, 50); helperBlock->inheritWeightPercentage(cond2Block, 50); // Append cond1 as JTRUE to cond1Block GenTree* jmpTree = gtNewOperNode(GT_JTRUE, TYP_VOID, condExpr); Statement* jmpStmt = fgNewStmtFromTree(jmpTree, stmt->GetDebugInfo()); fgInsertStmtAtEnd(cond1Block, jmpStmt); // Append cond2 as JTRUE to cond2Block jmpTree = gtNewOperNode(GT_JTRUE, TYP_VOID, cond2Expr); jmpStmt = fgNewStmtFromTree(jmpTree, stmt->GetDebugInfo()); fgInsertStmtAtEnd(cond2Block, jmpStmt); // AsgBlock should get tmp = op1 assignment. trueExpr = gtNewTempAssign(dst->AsLclVarCommon()->GetLclNum(), trueExpr); Statement* trueStmt = fgNewStmtFromTree(trueExpr, stmt->GetDebugInfo()); fgInsertStmtAtEnd(asgBlock, trueStmt); // Since we are adding helper in the JTRUE false path, reverse the cond2 and add the helper. gtReverseCond(cond2Expr); GenTree* helperExpr = gtNewTempAssign(dst->AsLclVarCommon()->GetLclNum(), true2Expr); Statement* helperStmt = fgNewStmtFromTree(helperExpr, stmt->GetDebugInfo()); fgInsertStmtAtEnd(helperBlock, helperStmt); // Finally remove the nested qmark stmt. fgRemoveStmt(block, stmt); if (true2Expr->OperIs(GT_CALL) && (true2Expr->AsCall()->gtCallMoreFlags & GTF_CALL_M_DOES_NOT_RETURN)) { fgConvertBBToThrowBB(helperBlock); } #ifdef DEBUG if (verbose) { printf("\nExpanding CastInstOf qmark in " FMT_BB " (after)\n", block->bbNum); fgDispBasicBlocks(block, remainderBlock, true); } #endif // DEBUG } /***************************************************************************** * * Expand a statement with a top level qmark node. There are three cases, based * on whether the qmark has both "true" and "false" arms, or just one of them. * * S0; * C ? T : F; * S1; * * Generates ===> * * bbj_always * +---->------+ * false | | * S0 -->-- ~C -->-- T F -->-- S1 * | | * +--->--------+ * bbj_cond(true) * * ----------------------------------------- * * S0; * C ? T : NOP; * S1; * * Generates ===> * * false * S0 -->-- ~C -->-- T -->-- S1 * | | * +-->-------------+ * bbj_cond(true) * * ----------------------------------------- * * S0; * C ? NOP : F; * S1; * * Generates ===> * * false * S0 -->-- C -->-- F -->-- S1 * | | * +-->------------+ * bbj_cond(true) * * If the qmark assigns to a variable, then create tmps for "then" * and "else" results and assign the temp to the variable as a writeback step. */ void Compiler::fgExpandQmarkStmt(BasicBlock* block, Statement* stmt) { GenTree* expr = stmt->GetRootNode(); // Retrieve the Qmark node to be expanded. GenTree* dst = nullptr; GenTree* qmark = fgGetTopLevelQmark(expr, &dst); if (qmark == nullptr) { return; } if (qmark->gtFlags & GTF_QMARK_CAST_INSTOF) { fgExpandQmarkForCastInstOf(block, stmt); return; } #ifdef DEBUG if (verbose) { printf("\nExpanding top-level qmark in " FMT_BB " (before)\n", block->bbNum); fgDispBasicBlocks(block, block, true); } #endif // DEBUG // Retrieve the operands. GenTree* condExpr = qmark->gtGetOp1(); GenTree* trueExpr = qmark->gtGetOp2()->AsColon()->ThenNode(); GenTree* falseExpr = qmark->gtGetOp2()->AsColon()->ElseNode(); assert(!varTypeIsFloating(condExpr->TypeGet())); bool hasTrueExpr = (trueExpr->OperGet() != GT_NOP); bool hasFalseExpr = (falseExpr->OperGet() != GT_NOP); assert(hasTrueExpr || hasFalseExpr); // We expect to have at least one arm of the qmark! // Create remainder, cond and "else" blocks. After this, the blocks are in this order: // block ... condBlock ... elseBlock ... remainderBlock // // We need to remember flags that exist on 'block' that we want to propagate to 'remainderBlock', // if they are going to be cleared by fgSplitBlockAfterStatement(). We currently only do this only // for the GC safe point bit, the logic being that if 'block' was marked gcsafe, then surely // remainderBlock will still be GC safe. BasicBlockFlags propagateFlags = block->bbFlags & BBF_GC_SAFE_POINT; BasicBlock* remainderBlock = fgSplitBlockAfterStatement(block, stmt); fgRemoveRefPred(remainderBlock, block); // We're going to put more blocks between block and remainderBlock. BasicBlock* condBlock = fgNewBBafter(BBJ_COND, block, true); BasicBlock* elseBlock = fgNewBBafter(BBJ_NONE, condBlock, true); // These blocks are only internal if 'block' is (but they've been set as internal by fgNewBBafter). // If they're not internal, mark them as imported to avoid asserts about un-imported blocks. if ((block->bbFlags & BBF_INTERNAL) == 0) { condBlock->bbFlags &= ~BBF_INTERNAL; elseBlock->bbFlags &= ~BBF_INTERNAL; condBlock->bbFlags |= BBF_IMPORTED; elseBlock->bbFlags |= BBF_IMPORTED; } remainderBlock->bbFlags |= propagateFlags; condBlock->inheritWeight(block); fgAddRefPred(condBlock, block); fgAddRefPred(elseBlock, condBlock); fgAddRefPred(remainderBlock, elseBlock); BasicBlock* thenBlock = nullptr; if (hasTrueExpr && hasFalseExpr) { // bbj_always // +---->------+ // false | | // S0 -->-- ~C -->-- T F -->-- S1 // | | // +--->--------+ // bbj_cond(true) // gtReverseCond(condExpr); condBlock->bbJumpDest = elseBlock; thenBlock = fgNewBBafter(BBJ_ALWAYS, condBlock, true); thenBlock->bbJumpDest = remainderBlock; if ((block->bbFlags & BBF_INTERNAL) == 0) { thenBlock->bbFlags &= ~BBF_INTERNAL; thenBlock->bbFlags |= BBF_IMPORTED; } fgAddRefPred(thenBlock, condBlock); fgAddRefPred(remainderBlock, thenBlock); thenBlock->inheritWeightPercentage(condBlock, 50); elseBlock->inheritWeightPercentage(condBlock, 50); } else if (hasTrueExpr) { // false // S0 -->-- ~C -->-- T -->-- S1 // | | // +-->-------------+ // bbj_cond(true) // gtReverseCond(condExpr); condBlock->bbJumpDest = remainderBlock; fgAddRefPred(remainderBlock, condBlock); // Since we have no false expr, use the one we'd already created. thenBlock = elseBlock; elseBlock = nullptr; thenBlock->inheritWeightPercentage(condBlock, 50); } else if (hasFalseExpr) { // false // S0 -->-- C -->-- F -->-- S1 // | | // +-->------------+ // bbj_cond(true) // condBlock->bbJumpDest = remainderBlock; fgAddRefPred(remainderBlock, condBlock); elseBlock->inheritWeightPercentage(condBlock, 50); } GenTree* jmpTree = gtNewOperNode(GT_JTRUE, TYP_VOID, qmark->gtGetOp1()); Statement* jmpStmt = fgNewStmtFromTree(jmpTree, stmt->GetDebugInfo()); fgInsertStmtAtEnd(condBlock, jmpStmt); // Remove the original qmark statement. fgRemoveStmt(block, stmt); // Since we have top level qmarks, we either have a dst for it in which case // we need to create tmps for true and falseExprs, else just don't bother // assigning. unsigned lclNum = BAD_VAR_NUM; if (dst != nullptr) { assert(dst->gtOper == GT_LCL_VAR); lclNum = dst->AsLclVar()->GetLclNum(); } else { assert(qmark->TypeGet() == TYP_VOID); } if (hasTrueExpr) { if (dst != nullptr) { trueExpr = gtNewTempAssign(lclNum, trueExpr); } Statement* trueStmt = fgNewStmtFromTree(trueExpr, stmt->GetDebugInfo()); fgInsertStmtAtEnd(thenBlock, trueStmt); } // Assign the falseExpr into the dst or tmp, insert in elseBlock if (hasFalseExpr) { if (dst != nullptr) { falseExpr = gtNewTempAssign(lclNum, falseExpr); } Statement* falseStmt = fgNewStmtFromTree(falseExpr, stmt->GetDebugInfo()); fgInsertStmtAtEnd(elseBlock, falseStmt); } #ifdef DEBUG if (verbose) { printf("\nExpanding top-level qmark in " FMT_BB " (after)\n", block->bbNum); fgDispBasicBlocks(block, remainderBlock, true); } #endif // DEBUG } /***************************************************************************** * * Expand GT_QMARK nodes from the flow graph into basic blocks. * */ void Compiler::fgExpandQmarkNodes() { if (compQmarkUsed) { for (BasicBlock* const block : Blocks()) { for (Statement* const stmt : block->Statements()) { GenTree* expr = stmt->GetRootNode(); #ifdef DEBUG fgPreExpandQmarkChecks(expr); #endif fgExpandQmarkStmt(block, stmt); } } #ifdef DEBUG fgPostExpandQmarkChecks(); #endif } compQmarkRationalized = true; } #ifdef DEBUG /***************************************************************************** * * Make sure we don't have any more GT_QMARK nodes. * */ void Compiler::fgPostExpandQmarkChecks() { for (BasicBlock* const block : Blocks()) { for (Statement* const stmt : block->Statements()) { GenTree* expr = stmt->GetRootNode(); fgWalkTreePre(&expr, Compiler::fgAssertNoQmark, nullptr); } } } #endif /***************************************************************************** * * Promoting struct locals */ void Compiler::fgPromoteStructs() { #ifdef DEBUG if (verbose) { printf("*************** In fgPromoteStructs()\n"); } #endif // DEBUG if (!opts.OptEnabled(CLFLG_STRUCTPROMOTE)) { JITDUMP(" promotion opt flag not enabled\n"); return; } if (fgNoStructPromotion) { JITDUMP(" promotion disabled by JitNoStructPromotion\n"); return; } #if 0 // The code in this #if has been useful in debugging struct promotion issues, by // enabling selective enablement of the struct promotion optimization according to // method hash. #ifdef DEBUG unsigned methHash = info.compMethodHash(); char* lostr = getenv("structpromohashlo"); unsigned methHashLo = 0; if (lostr != NULL) { sscanf_s(lostr, "%x", &methHashLo); } char* histr = getenv("structpromohashhi"); unsigned methHashHi = UINT32_MAX; if (histr != NULL) { sscanf_s(histr, "%x", &methHashHi); } if (methHash < methHashLo || methHash > methHashHi) { return; } else { printf("Promoting structs for method %s, hash = 0x%x.\n", info.compFullName, info.compMethodHash()); printf(""); // in our logic this causes a flush } #endif // DEBUG #endif // 0 if (info.compIsVarArgs) { JITDUMP(" promotion disabled because of varargs\n"); return; } #ifdef DEBUG if (verbose) { printf("\nlvaTable before fgPromoteStructs\n"); lvaTableDump(); } #endif // DEBUG // The lvaTable might grow as we grab temps. Make a local copy here. unsigned startLvaCount = lvaCount; // // Loop through the original lvaTable. Looking for struct locals to be promoted. // lvaStructPromotionInfo structPromotionInfo; bool tooManyLocalsReported = false; // Clear the structPromotionHelper, since it is used during inlining, at which point it // may be conservative about looking up SIMD info. // We don't want to preserve those conservative decisions for the actual struct promotion. structPromotionHelper->Clear(); for (unsigned lclNum = 0; lclNum < startLvaCount; lclNum++) { // Whether this var got promoted bool promotedVar = false; LclVarDsc* varDsc = lvaGetDesc(lclNum); // If we have marked this as lvUsedInSIMDIntrinsic, then we do not want to promote // its fields. Instead, we will attempt to enregister the entire struct. if (varDsc->lvIsSIMDType() && (varDsc->lvIsUsedInSIMDIntrinsic() || isOpaqueSIMDLclVar(varDsc))) { varDsc->lvRegStruct = true; } // Don't promote if we have reached the tracking limit. else if (lvaHaveManyLocals()) { // Print the message first time when we detected this condition if (!tooManyLocalsReported) { JITDUMP("Stopped promoting struct fields, due to too many locals.\n"); } tooManyLocalsReported = true; } else if (varTypeIsStruct(varDsc)) { assert(structPromotionHelper != nullptr); promotedVar = structPromotionHelper->TryPromoteStructVar(lclNum); } if (!promotedVar && varDsc->lvIsSIMDType() && !varDsc->lvFieldAccessed) { // Even if we have not used this in a SIMD intrinsic, if it is not being promoted, // we will treat it as a reg struct. varDsc->lvRegStruct = true; } } #ifdef DEBUG if (verbose) { printf("\nlvaTable after fgPromoteStructs\n"); lvaTableDump(); } #endif // DEBUG } void Compiler::fgMorphStructField(GenTree* tree, GenTree* parent) { noway_assert(tree->OperGet() == GT_FIELD); GenTreeField* field = tree->AsField(); GenTree* objRef = field->GetFldObj(); GenTree* obj = ((objRef != nullptr) && (objRef->gtOper == GT_ADDR)) ? objRef->AsOp()->gtOp1 : nullptr; noway_assert((tree->gtFlags & GTF_GLOB_REF) || ((obj != nullptr) && (obj->gtOper == GT_LCL_VAR))); /* Is this an instance data member? */ if ((obj != nullptr) && (obj->gtOper == GT_LCL_VAR)) { unsigned lclNum = obj->AsLclVarCommon()->GetLclNum(); const LclVarDsc* varDsc = lvaGetDesc(lclNum); if (varTypeIsStruct(obj)) { if (varDsc->lvPromoted) { // Promoted struct unsigned fldOffset = field->gtFldOffset; unsigned fieldLclIndex = lvaGetFieldLocal(varDsc, fldOffset); if (fieldLclIndex == BAD_VAR_NUM) { // Access a promoted struct's field with an offset that doesn't correspond to any field. // It can happen if the struct was cast to another struct with different offsets. return; } const LclVarDsc* fieldDsc = lvaGetDesc(fieldLclIndex); var_types fieldType = fieldDsc->TypeGet(); assert(fieldType != TYP_STRUCT); // promoted LCL_VAR can't have a struct type. if (tree->TypeGet() != fieldType) { if (tree->TypeGet() != TYP_STRUCT) { // This is going to be an incorrect instruction promotion. // For example when we try to read int as long. return; } if (field->gtFldHnd != fieldDsc->lvFieldHnd) { CORINFO_CLASS_HANDLE fieldTreeClass = nullptr, fieldDscClass = nullptr; CorInfoType fieldTreeType = info.compCompHnd->getFieldType(field->gtFldHnd, &fieldTreeClass); CorInfoType fieldDscType = info.compCompHnd->getFieldType(fieldDsc->lvFieldHnd, &fieldDscClass); if (fieldTreeType != fieldDscType || fieldTreeClass != fieldDscClass) { // Access the promoted field with a different class handle, can't check that types match. return; } // Access the promoted field as a field of a non-promoted struct with the same class handle. } else { // As we already checked this above, we must have a tree with a TYP_STRUCT type // assert(tree->TypeGet() == TYP_STRUCT); // The field tree accesses it as a struct, but the promoted LCL_VAR field // says that it has another type. This happens when struct promotion unwraps // a single field struct to get to its ultimate type. // // Note that currently, we cannot have a promoted LCL_VAR field with a struct type. // // This mismatch in types can lead to problems for some parent node type like GT_RETURN. // So we check the parent node and only allow this optimization when we have // a GT_ADDR or a GT_ASG. // // Note that for a GT_ASG we have to do some additional work, // see below after the SetOper(GT_LCL_VAR) // if (!parent->OperIs(GT_ADDR, GT_ASG)) { // Don't transform other operations such as GT_RETURN // return; } #ifdef DEBUG // This is an additional DEBUG-only sanity check // assert(structPromotionHelper != nullptr); structPromotionHelper->CheckRetypedAsScalar(field->gtFldHnd, fieldType); #endif // DEBUG } } tree->SetOper(GT_LCL_VAR); tree->AsLclVarCommon()->SetLclNum(fieldLclIndex); tree->gtType = fieldType; tree->gtFlags &= GTF_NODE_MASK; // Note: that clears all flags except `GTF_COLON_COND`. if (parent->gtOper == GT_ASG) { // If we are changing the left side of an assignment, we need to set // these two flags: // if (parent->AsOp()->gtOp1 == tree) { tree->gtFlags |= GTF_VAR_DEF; tree->gtFlags |= GTF_DONT_CSE; } // Promotion of struct containing struct fields where the field // is a struct with a single pointer sized scalar type field: in // this case struct promotion uses the type of the underlying // scalar field as the type of struct field instead of recursively // promoting. This can lead to a case where we have a block-asgn // with its RHS replaced with a scalar type. Mark RHS value as // DONT_CSE so that assertion prop will not do const propagation. // The reason this is required is that if RHS of a block-asg is a // constant, then it is interpreted as init-block incorrectly. // // TODO - This can also be avoided if we implement recursive struct // promotion, tracked by #10019. if (varTypeIsStruct(parent) && parent->AsOp()->gtOp2 == tree && !varTypeIsStruct(tree)) { tree->gtFlags |= GTF_DONT_CSE; } } #ifdef DEBUG if (verbose) { printf("Replacing the field in promoted struct with local var V%02u\n", fieldLclIndex); } #endif // DEBUG } } else { // Normed struct // A "normed struct" is a struct that the VM tells us is a basic type. This can only happen if // the struct contains a single element, and that element is 4 bytes (on x64 it can also be 8 // bytes). Normally, the type of the local var and the type of GT_FIELD are equivalent. However, // there is one extremely rare case where that won't be true. An enum type is a special value type // that contains exactly one element of a primitive integer type (that, for CLS programs is named // "value__"). The VM tells us that a local var of that enum type is the primitive type of the // enum's single field. It turns out that it is legal for IL to access this field using ldflda or // ldfld. For example: // // .class public auto ansi sealed mynamespace.e_t extends [mscorlib]System.Enum // { // .field public specialname rtspecialname int16 value__ // .field public static literal valuetype mynamespace.e_t one = int16(0x0000) // } // .method public hidebysig static void Main() cil managed // { // .locals init (valuetype mynamespace.e_t V_0) // ... // ldloca.s V_0 // ldflda int16 mynamespace.e_t::value__ // ... // } // // Normally, compilers will not generate the ldflda, since it is superfluous. // // In the example, the lclVar is short, but the JIT promotes all trees using this local to the // "actual type", that is, INT. But the GT_FIELD is still SHORT. So, in the case of a type // mismatch like this, don't do this morphing. The local var may end up getting marked as // address taken, and the appropriate SHORT load will be done from memory in that case. if (tree->TypeGet() == obj->TypeGet()) { tree->ChangeOper(GT_LCL_VAR); tree->AsLclVarCommon()->SetLclNum(lclNum); tree->gtFlags &= GTF_NODE_MASK; if ((parent->gtOper == GT_ASG) && (parent->AsOp()->gtOp1 == tree)) { tree->gtFlags |= GTF_VAR_DEF; tree->gtFlags |= GTF_DONT_CSE; } #ifdef DEBUG if (verbose) { printf("Replacing the field in normed struct with local var V%02u\n", lclNum); } #endif // DEBUG } } } } void Compiler::fgMorphLocalField(GenTree* tree, GenTree* parent) { noway_assert(tree->OperGet() == GT_LCL_FLD); unsigned lclNum = tree->AsLclFld()->GetLclNum(); LclVarDsc* varDsc = lvaGetDesc(lclNum); if (varTypeIsStruct(varDsc)) { if (varDsc->lvPromoted) { // Promoted struct unsigned fldOffset = tree->AsLclFld()->GetLclOffs(); unsigned fieldLclIndex = 0; LclVarDsc* fldVarDsc = nullptr; if (fldOffset != BAD_VAR_NUM) { fieldLclIndex = lvaGetFieldLocal(varDsc, fldOffset); noway_assert(fieldLclIndex != BAD_VAR_NUM); fldVarDsc = lvaGetDesc(fieldLclIndex); } var_types treeType = tree->TypeGet(); var_types fieldType = fldVarDsc->TypeGet(); if (fldOffset != BAD_VAR_NUM && ((genTypeSize(fieldType) == genTypeSize(treeType)) || (varDsc->lvFieldCnt == 1))) { // There is an existing sub-field we can use. tree->AsLclFld()->SetLclNum(fieldLclIndex); // The field must be an enregisterable type; otherwise it would not be a promoted field. // The tree type may not match, e.g. for return types that have been morphed, but both // must be enregisterable types. assert(varTypeIsEnregisterable(treeType) && varTypeIsEnregisterable(fieldType)); tree->ChangeOper(GT_LCL_VAR); assert(tree->AsLclVarCommon()->GetLclNum() == fieldLclIndex); tree->gtType = fldVarDsc->TypeGet(); if ((parent->gtOper == GT_ASG) && (parent->AsOp()->gtOp1 == tree)) { tree->gtFlags |= GTF_VAR_DEF; tree->gtFlags |= GTF_DONT_CSE; } JITDUMP("Replacing the GT_LCL_FLD in promoted struct with local var V%02u\n", fieldLclIndex); } else { // There is no existing field that has all the parts that we need // So we must ensure that the struct lives in memory. lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::LocalField)); #ifdef DEBUG // We can't convert this guy to a float because he really does have his // address taken.. varDsc->lvKeepType = 1; #endif // DEBUG } } else if (varTypeIsSIMD(varDsc) && (genTypeSize(tree->TypeGet()) == genTypeSize(varDsc))) { assert(tree->AsLclFld()->GetLclOffs() == 0); tree->gtType = varDsc->TypeGet(); tree->ChangeOper(GT_LCL_VAR); JITDUMP("Replacing GT_LCL_FLD of struct with local var V%02u\n", lclNum); } } } //------------------------------------------------------------------------ // fgResetImplicitByRefRefCount: Clear the ref count field of all implicit byrefs void Compiler::fgResetImplicitByRefRefCount() { #if (defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI)) || defined(TARGET_ARM64) #ifdef DEBUG if (verbose) { printf("\n*************** In fgResetImplicitByRefRefCount()\n"); } #endif // DEBUG for (unsigned lclNum = 0; lclNum < info.compArgsCount; ++lclNum) { LclVarDsc* varDsc = lvaGetDesc(lclNum); if (varDsc->lvIsImplicitByRef) { // Clear the ref count field; fgMarkAddressTakenLocals will increment it per // appearance of implicit-by-ref param so that call arg morphing can do an // optimization for single-use implicit-by-ref params whose single use is as // an outgoing call argument. varDsc->setLvRefCnt(0, RCS_EARLY); varDsc->setLvRefCntWtd(0, RCS_EARLY); } } #endif // (TARGET_AMD64 && !UNIX_AMD64_ABI) || TARGET_ARM64 } //------------------------------------------------------------------------ // fgRetypeImplicitByRefArgs: Update the types on implicit byref parameters' `LclVarDsc`s (from // struct to pointer). Also choose (based on address-exposed analysis) // which struct promotions of implicit byrefs to keep or discard. // For those which are kept, insert the appropriate initialization code. // For those which are to be discarded, annotate the promoted field locals // so that fgMorphImplicitByRefArgs will know to rewrite their appearances // using indirections off the pointer parameters. void Compiler::fgRetypeImplicitByRefArgs() { #if (defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI)) || defined(TARGET_ARM64) #ifdef DEBUG if (verbose) { printf("\n*************** In fgRetypeImplicitByRefArgs()\n"); } #endif // DEBUG for (unsigned lclNum = 0; lclNum < info.compArgsCount; lclNum++) { LclVarDsc* varDsc = lvaGetDesc(lclNum); if (lvaIsImplicitByRefLocal(lclNum)) { unsigned size; if (varDsc->lvSize() > REGSIZE_BYTES) { size = varDsc->lvSize(); } else { CORINFO_CLASS_HANDLE typeHnd = varDsc->GetStructHnd(); size = info.compCompHnd->getClassSize(typeHnd); } if (varDsc->lvPromoted) { // This implicit-by-ref was promoted; create a new temp to represent the // promoted struct before rewriting this parameter as a pointer. unsigned newLclNum = lvaGrabTemp(false DEBUGARG("Promoted implicit byref")); lvaSetStruct(newLclNum, lvaGetStruct(lclNum), true); if (info.compIsVarArgs) { lvaSetStructUsedAsVarArg(newLclNum); } // Update varDsc since lvaGrabTemp might have re-allocated the var dsc array. varDsc = lvaGetDesc(lclNum); // Copy the struct promotion annotations to the new temp. LclVarDsc* newVarDsc = lvaGetDesc(newLclNum); newVarDsc->lvPromoted = true; newVarDsc->lvFieldLclStart = varDsc->lvFieldLclStart; newVarDsc->lvFieldCnt = varDsc->lvFieldCnt; newVarDsc->lvContainsHoles = varDsc->lvContainsHoles; newVarDsc->lvCustomLayout = varDsc->lvCustomLayout; #ifdef DEBUG newVarDsc->lvKeepType = true; #endif // DEBUG // Propagate address-taken-ness and do-not-enregister-ness. newVarDsc->SetAddressExposed(varDsc->IsAddressExposed() DEBUGARG(varDsc->GetAddrExposedReason())); newVarDsc->lvDoNotEnregister = varDsc->lvDoNotEnregister; newVarDsc->lvLiveInOutOfHndlr = varDsc->lvLiveInOutOfHndlr; newVarDsc->lvSingleDef = varDsc->lvSingleDef; newVarDsc->lvSingleDefRegCandidate = varDsc->lvSingleDefRegCandidate; newVarDsc->lvSpillAtSingleDef = varDsc->lvSpillAtSingleDef; #ifdef DEBUG newVarDsc->SetDoNotEnregReason(varDsc->GetDoNotEnregReason()); #endif // DEBUG // If the promotion is dependent, the promoted temp would just be committed // to memory anyway, so we'll rewrite its appearances to be indirections // through the pointer parameter, the same as we'd do for this // parameter if it weren't promoted at all (otherwise the initialization // of the new temp would just be a needless memcpy at method entry). // // Otherwise, see how many appearances there are. We keep two early ref counts: total // number of references to the struct or some field, and how many of these are // arguments to calls. We undo promotion unless we see enough non-call uses. // const unsigned totalAppearances = varDsc->lvRefCnt(RCS_EARLY); const unsigned callAppearances = (unsigned)varDsc->lvRefCntWtd(RCS_EARLY); assert(totalAppearances >= callAppearances); const unsigned nonCallAppearances = totalAppearances - callAppearances; bool undoPromotion = ((lvaGetPromotionType(newVarDsc) == PROMOTION_TYPE_DEPENDENT) || (nonCallAppearances <= varDsc->lvFieldCnt)); #ifdef DEBUG // Above is a profitability heurisic; either value of // undoPromotion should lead to correct code. So, // under stress, make different decisions at times. if (compStressCompile(STRESS_BYREF_PROMOTION, 25)) { undoPromotion = !undoPromotion; JITDUMP("Stress -- changing byref undo promotion for V%02u to %s undo\n", lclNum, undoPromotion ? "" : "NOT"); } #endif // DEBUG JITDUMP("%s promotion of implicit by-ref V%02u: %s total: %u non-call: %u fields: %u\n", undoPromotion ? "Undoing" : "Keeping", lclNum, (lvaGetPromotionType(newVarDsc) == PROMOTION_TYPE_DEPENDENT) ? "dependent;" : "", totalAppearances, nonCallAppearances, varDsc->lvFieldCnt); if (!undoPromotion) { // Insert IR that initializes the temp from the parameter. // LHS is a simple reference to the temp. fgEnsureFirstBBisScratch(); GenTree* lhs = gtNewLclvNode(newLclNum, varDsc->lvType); // RHS is an indirection (using GT_OBJ) off the parameter. GenTree* addr = gtNewLclvNode(lclNum, TYP_BYREF); GenTree* rhs = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, addr, typGetBlkLayout(size)); GenTree* assign = gtNewAssignNode(lhs, rhs); fgNewStmtAtBeg(fgFirstBB, assign); } // Update the locals corresponding to the promoted fields. unsigned fieldLclStart = varDsc->lvFieldLclStart; unsigned fieldCount = varDsc->lvFieldCnt; unsigned fieldLclStop = fieldLclStart + fieldCount; for (unsigned fieldLclNum = fieldLclStart; fieldLclNum < fieldLclStop; ++fieldLclNum) { LclVarDsc* fieldVarDsc = lvaGetDesc(fieldLclNum); if (undoPromotion) { // Leave lvParentLcl pointing to the parameter so that fgMorphImplicitByRefArgs // will know to rewrite appearances of this local. assert(fieldVarDsc->lvParentLcl == lclNum); } else { // Set the new parent. fieldVarDsc->lvParentLcl = newLclNum; } fieldVarDsc->lvIsParam = false; // The fields shouldn't inherit any register preferences from // the parameter which is really a pointer to the struct. fieldVarDsc->lvIsRegArg = false; fieldVarDsc->lvIsMultiRegArg = false; fieldVarDsc->SetArgReg(REG_NA); #if FEATURE_MULTIREG_ARGS fieldVarDsc->SetOtherArgReg(REG_NA); #endif } // Hijack lvFieldLclStart to record the new temp number. // It will get fixed up in fgMarkDemotedImplicitByRefArgs. varDsc->lvFieldLclStart = newLclNum; // Go ahead and clear lvFieldCnt -- either we're promoting // a replacement temp or we're not promoting this arg, and // in either case the parameter is now a pointer that doesn't // have these fields. varDsc->lvFieldCnt = 0; // Hijack lvPromoted to communicate to fgMorphImplicitByRefArgs // whether references to the struct should be rewritten as // indirections off the pointer (not promoted) or references // to the new struct local (promoted). varDsc->lvPromoted = !undoPromotion; } else { // The "undo promotion" path above clears lvPromoted for args that struct // promotion wanted to promote but that aren't considered profitable to // rewrite. It hijacks lvFieldLclStart to communicate to // fgMarkDemotedImplicitByRefArgs that it needs to clean up annotations left // on such args for fgMorphImplicitByRefArgs to consult in the interim. // Here we have an arg that was simply never promoted, so make sure it doesn't // have nonzero lvFieldLclStart, since that would confuse fgMorphImplicitByRefArgs // and fgMarkDemotedImplicitByRefArgs. assert(varDsc->lvFieldLclStart == 0); } // Since the parameter in this position is really a pointer, its type is TYP_BYREF. varDsc->lvType = TYP_BYREF; // Since this previously was a TYP_STRUCT and we have changed it to a TYP_BYREF // make sure that the following flag is not set as these will force SSA to // exclude tracking/enregistering these LclVars. (see SsaBuilder::IncludeInSsa) // varDsc->lvOverlappingFields = 0; // This flag could have been set, clear it. // The struct parameter may have had its address taken, but the pointer parameter // cannot -- any uses of the struct parameter's address are uses of the pointer // parameter's value, and there's no way for the MSIL to reference the pointer // parameter's address. So clear the address-taken bit for the parameter. varDsc->CleanAddressExposed(); varDsc->lvDoNotEnregister = 0; #ifdef DEBUG // This should not be converted to a double in stress mode, // because it is really a pointer varDsc->lvKeepType = 1; if (verbose) { printf("Changing the lvType for struct parameter V%02d to TYP_BYREF.\n", lclNum); } #endif // DEBUG } } #endif // (TARGET_AMD64 && !UNIX_AMD64_ABI) || TARGET_ARM64 } //------------------------------------------------------------------------ // fgMarkDemotedImplicitByRefArgs: Clear annotations for any implicit byrefs that struct promotion // asked to promote. Appearances of these have now been rewritten // (by fgMorphImplicitByRefArgs) using indirections from the pointer // parameter or references to the promotion temp, as appropriate. void Compiler::fgMarkDemotedImplicitByRefArgs() { JITDUMP("\n*************** In fgMarkDemotedImplicitByRefArgs()\n"); #if (defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI)) || defined(TARGET_ARM64) for (unsigned lclNum = 0; lclNum < info.compArgsCount; lclNum++) { LclVarDsc* varDsc = lvaGetDesc(lclNum); if (lvaIsImplicitByRefLocal(lclNum)) { JITDUMP("Clearing annotation for V%02d\n", lclNum); if (varDsc->lvPromoted) { // The parameter is simply a pointer now, so clear lvPromoted. It was left set // by fgRetypeImplicitByRefArgs to communicate to fgMorphImplicitByRefArgs that // appearances of this arg needed to be rewritten to a new promoted struct local. varDsc->lvPromoted = false; // Clear the lvFieldLclStart value that was set by fgRetypeImplicitByRefArgs // to tell fgMorphImplicitByRefArgs which local is the new promoted struct one. varDsc->lvFieldLclStart = 0; } else if (varDsc->lvFieldLclStart != 0) { // We created new temps to represent a promoted struct corresponding to this // parameter, but decided not to go through with the promotion and have // rewritten all uses as indirections off the pointer parameter. // We stashed the pointer to the new struct temp in lvFieldLclStart; make // note of that and clear the annotation. unsigned structLclNum = varDsc->lvFieldLclStart; varDsc->lvFieldLclStart = 0; // The temp struct is now unused; set flags appropriately so that we // won't allocate space for it on the stack. LclVarDsc* structVarDsc = lvaGetDesc(structLclNum); structVarDsc->CleanAddressExposed(); #ifdef DEBUG structVarDsc->lvUnusedStruct = true; structVarDsc->lvUndoneStructPromotion = true; #endif // DEBUG unsigned fieldLclStart = structVarDsc->lvFieldLclStart; unsigned fieldCount = structVarDsc->lvFieldCnt; unsigned fieldLclStop = fieldLclStart + fieldCount; for (unsigned fieldLclNum = fieldLclStart; fieldLclNum < fieldLclStop; ++fieldLclNum) { JITDUMP("Fixing pointer for field V%02d from V%02d to V%02d\n", fieldLclNum, lclNum, structLclNum); // Fix the pointer to the parent local. LclVarDsc* fieldVarDsc = lvaGetDesc(fieldLclNum); assert(fieldVarDsc->lvParentLcl == lclNum); fieldVarDsc->lvParentLcl = structLclNum; // The field local is now unused; set flags appropriately so that // we won't allocate stack space for it. fieldVarDsc->CleanAddressExposed(); } } } } #endif // (TARGET_AMD64 && !UNIX_AMD64_ABI) || TARGET_ARM64 } /***************************************************************************** * * Morph irregular parameters * for x64 and ARM64 this means turning them into byrefs, adding extra indirs. */ bool Compiler::fgMorphImplicitByRefArgs(GenTree* tree) { #if (!defined(TARGET_AMD64) || defined(UNIX_AMD64_ABI)) && !defined(TARGET_ARM64) return false; #else // (TARGET_AMD64 && !UNIX_AMD64_ABI) || TARGET_ARM64 bool changed = false; // Implicit byref morphing needs to know if the reference to the parameter is a // child of GT_ADDR or not, so this method looks one level down and does the // rewrite whenever a child is a reference to an implicit byref parameter. if (tree->gtOper == GT_ADDR) { if (tree->AsOp()->gtOp1->gtOper == GT_LCL_VAR) { GenTree* morphedTree = fgMorphImplicitByRefArgs(tree, true); changed = (morphedTree != nullptr); assert(!changed || (morphedTree == tree)); } } else { for (GenTree** pTree : tree->UseEdges()) { GenTree** pTreeCopy = pTree; GenTree* childTree = *pTree; if (childTree->gtOper == GT_LCL_VAR) { GenTree* newChildTree = fgMorphImplicitByRefArgs(childTree, false); if (newChildTree != nullptr) { changed = true; *pTreeCopy = newChildTree; } } } } return changed; #endif // (TARGET_AMD64 && !UNIX_AMD64_ABI) || TARGET_ARM64 } GenTree* Compiler::fgMorphImplicitByRefArgs(GenTree* tree, bool isAddr) { assert((tree->gtOper == GT_LCL_VAR) || ((tree->gtOper == GT_ADDR) && (tree->AsOp()->gtOp1->gtOper == GT_LCL_VAR))); assert(isAddr == (tree->gtOper == GT_ADDR)); GenTree* lclVarTree = isAddr ? tree->AsOp()->gtOp1 : tree; unsigned lclNum = lclVarTree->AsLclVarCommon()->GetLclNum(); LclVarDsc* lclVarDsc = lvaGetDesc(lclNum); CORINFO_FIELD_HANDLE fieldHnd; unsigned fieldOffset = 0; var_types fieldRefType = TYP_UNKNOWN; if (lvaIsImplicitByRefLocal(lclNum)) { // The SIMD transformation to coalesce contiguous references to SIMD vector fields will // re-invoke the traversal to mark address-taken locals. // So, we may encounter a tree that has already been transformed to TYP_BYREF. // If we do, leave it as-is. if (!varTypeIsStruct(lclVarTree)) { assert(lclVarTree->TypeGet() == TYP_BYREF); return nullptr; } else if (lclVarDsc->lvPromoted) { // fgRetypeImplicitByRefArgs created a new promoted struct local to represent this // arg. Rewrite this to refer to the new local. assert(lclVarDsc->lvFieldLclStart != 0); lclVarTree->AsLclVarCommon()->SetLclNum(lclVarDsc->lvFieldLclStart); return tree; } fieldHnd = nullptr; } else if (lclVarDsc->lvIsStructField && lvaIsImplicitByRefLocal(lclVarDsc->lvParentLcl)) { // This was a field reference to an implicit-by-reference struct parameter that was // dependently promoted; update it to a field reference off the pointer. // Grab the field handle from the struct field lclVar. fieldHnd = lclVarDsc->lvFieldHnd; fieldOffset = lclVarDsc->lvFldOffset; assert(fieldHnd != nullptr); // Update lclNum/lclVarDsc to refer to the parameter lclNum = lclVarDsc->lvParentLcl; lclVarDsc = lvaGetDesc(lclNum); fieldRefType = lclVarTree->TypeGet(); } else { // We only need to tranform the 'marked' implicit by ref parameters return nullptr; } // This is no longer a def of the lclVar, even if it WAS a def of the struct. lclVarTree->gtFlags &= ~(GTF_LIVENESS_MASK); if (isAddr) { if (fieldHnd == nullptr) { // change &X into just plain X tree->ReplaceWith(lclVarTree, this); tree->gtType = TYP_BYREF; } else { // change &(X.f) [i.e. GT_ADDR of local for promoted arg field] // into &(X, f) [i.e. GT_ADDR of GT_FIELD off ptr param] lclVarTree->AsLclVarCommon()->SetLclNum(lclNum); lclVarTree->gtType = TYP_BYREF; tree->AsOp()->gtOp1 = gtNewFieldRef(fieldRefType, fieldHnd, lclVarTree, fieldOffset); } #ifdef DEBUG if (verbose) { printf("Replacing address of implicit by ref struct parameter with byref:\n"); } #endif // DEBUG } else { // Change X into OBJ(X) or FIELD(X, f) var_types structType = tree->gtType; tree->gtType = TYP_BYREF; if (fieldHnd) { tree->AsLclVarCommon()->SetLclNum(lclNum); tree = gtNewFieldRef(fieldRefType, fieldHnd, tree, fieldOffset); } else { tree = gtNewObjNode(lclVarDsc->GetStructHnd(), tree); if (structType == TYP_STRUCT) { gtSetObjGcInfo(tree->AsObj()); } } // TODO-CQ: If the VM ever stops violating the ABI and passing heap references // we could remove TGTANYWHERE tree->gtFlags = ((tree->gtFlags & GTF_COMMON_MASK) | GTF_IND_TGTANYWHERE); #ifdef DEBUG if (verbose) { printf("Replacing value of implicit by ref struct parameter with indir of parameter:\n"); } #endif // DEBUG } #ifdef DEBUG if (verbose) { gtDispTree(tree); } #endif // DEBUG return tree; } //------------------------------------------------------------------------ // fgAddFieldSeqForZeroOffset: // Associate a fieldSeq (with a zero offset) with the GenTree node 'addr' // // Arguments: // addr - A GenTree node // fieldSeqZero - a fieldSeq (with a zero offset) // // Notes: // Some GenTree nodes have internal fields that record the field sequence. // If we have one of these nodes: GT_CNS_INT, GT_LCL_FLD // we can append the field sequence using the gtFieldSeq // If we have a GT_ADD of a GT_CNS_INT we can use the // fieldSeq from child node. // Otherwise we record 'fieldSeqZero' in the GenTree node using // a Map: GetFieldSeqStore() // When doing so we take care to preserve any existing zero field sequence // void Compiler::fgAddFieldSeqForZeroOffset(GenTree* addr, FieldSeqNode* fieldSeqZero) { // We expect 'addr' to be an address at this point. assert(addr->TypeGet() == TYP_BYREF || addr->TypeGet() == TYP_I_IMPL || addr->TypeGet() == TYP_REF); // Tunnel through any commas. const bool commaOnly = true; addr = addr->gtEffectiveVal(commaOnly); // We still expect 'addr' to be an address at this point. assert(addr->TypeGet() == TYP_BYREF || addr->TypeGet() == TYP_I_IMPL || addr->TypeGet() == TYP_REF); FieldSeqNode* fieldSeqUpdate = fieldSeqZero; GenTree* fieldSeqNode = addr; bool fieldSeqRecorded = false; #ifdef DEBUG if (verbose) { printf("\nfgAddFieldSeqForZeroOffset for"); gtDispAnyFieldSeq(fieldSeqZero); printf("\naddr (Before)\n"); gtDispNode(addr, nullptr, nullptr, false); gtDispCommonEndLine(addr); } #endif // DEBUG switch (addr->OperGet()) { case GT_CNS_INT: fieldSeqUpdate = GetFieldSeqStore()->Append(addr->AsIntCon()->gtFieldSeq, fieldSeqZero); addr->AsIntCon()->gtFieldSeq = fieldSeqUpdate; fieldSeqRecorded = true; break; case GT_ADDR: if (addr->AsOp()->gtOp1->OperGet() == GT_LCL_FLD) { fieldSeqNode = addr->AsOp()->gtOp1; GenTreeLclFld* lclFld = addr->AsOp()->gtOp1->AsLclFld(); fieldSeqUpdate = GetFieldSeqStore()->Append(lclFld->GetFieldSeq(), fieldSeqZero); lclFld->SetFieldSeq(fieldSeqUpdate); fieldSeqRecorded = true; } break; case GT_ADD: if (addr->AsOp()->gtOp1->OperGet() == GT_CNS_INT) { fieldSeqNode = addr->AsOp()->gtOp1; fieldSeqUpdate = GetFieldSeqStore()->Append(addr->AsOp()->gtOp1->AsIntCon()->gtFieldSeq, fieldSeqZero); addr->AsOp()->gtOp1->AsIntCon()->gtFieldSeq = fieldSeqUpdate; fieldSeqRecorded = true; } else if (addr->AsOp()->gtOp2->OperGet() == GT_CNS_INT) { fieldSeqNode = addr->AsOp()->gtOp2; fieldSeqUpdate = GetFieldSeqStore()->Append(addr->AsOp()->gtOp2->AsIntCon()->gtFieldSeq, fieldSeqZero); addr->AsOp()->gtOp2->AsIntCon()->gtFieldSeq = fieldSeqUpdate; fieldSeqRecorded = true; } break; default: break; } if (fieldSeqRecorded == false) { // Record in the general zero-offset map. // The "addr" node might already be annotated with a zero-offset field sequence. FieldSeqNode* existingFieldSeq = nullptr; if (GetZeroOffsetFieldMap()->Lookup(addr, &existingFieldSeq)) { // Append the zero field sequences fieldSeqUpdate = GetFieldSeqStore()->Append(existingFieldSeq, fieldSeqZero); } // Overwrite the field sequence annotation for op1 GetZeroOffsetFieldMap()->Set(addr, fieldSeqUpdate, NodeToFieldSeqMap::Overwrite); fieldSeqRecorded = true; } #ifdef DEBUG if (verbose) { printf(" (After)\n"); gtDispNode(fieldSeqNode, nullptr, nullptr, false); gtDispCommonEndLine(fieldSeqNode); } #endif // DEBUG } #ifdef FEATURE_SIMD //----------------------------------------------------------------------------------- // fgMorphCombineSIMDFieldAssignments: // If the RHS of the input stmt is a read for simd vector X Field, then this function // will keep reading next few stmts based on the vector size(2, 3, 4). // If the next stmts LHS are located contiguous and RHS are also located // contiguous, then we replace those statements with a copyblk. // // Argument: // block - BasicBlock*. block which stmt belongs to // stmt - Statement*. the stmt node we want to check // // return value: // if this funciton successfully optimized the stmts, then return true. Otherwise // return false; bool Compiler::fgMorphCombineSIMDFieldAssignments(BasicBlock* block, Statement* stmt) { GenTree* tree = stmt->GetRootNode(); assert(tree->OperGet() == GT_ASG); GenTree* originalLHS = tree->AsOp()->gtOp1; GenTree* prevLHS = tree->AsOp()->gtOp1; GenTree* prevRHS = tree->AsOp()->gtOp2; unsigned index = 0; CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF; unsigned simdSize = 0; GenTree* simdStructNode = getSIMDStructFromField(prevRHS, &simdBaseJitType, &index, &simdSize, true); if (simdStructNode == nullptr || index != 0 || simdBaseJitType != CORINFO_TYPE_FLOAT) { // if the RHS is not from a SIMD vector field X, then there is no need to check further. return false; } var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); var_types simdType = getSIMDTypeForSize(simdSize); int assignmentsCount = simdSize / genTypeSize(simdBaseType) - 1; int remainingAssignments = assignmentsCount; Statement* curStmt = stmt->GetNextStmt(); Statement* lastStmt = stmt; while (curStmt != nullptr && remainingAssignments > 0) { GenTree* exp = curStmt->GetRootNode(); if (exp->OperGet() != GT_ASG) { break; } GenTree* curLHS = exp->gtGetOp1(); GenTree* curRHS = exp->gtGetOp2(); if (!areArgumentsContiguous(prevLHS, curLHS) || !areArgumentsContiguous(prevRHS, curRHS)) { break; } remainingAssignments--; prevLHS = curLHS; prevRHS = curRHS; lastStmt = curStmt; curStmt = curStmt->GetNextStmt(); } if (remainingAssignments > 0) { // if the left assignments number is bigger than zero, then this means // that the assignments are not assgining to the contiguously memory // locations from same vector. return false; } #ifdef DEBUG if (verbose) { printf("\nFound contiguous assignments from a SIMD vector to memory.\n"); printf("From " FMT_BB ", stmt ", block->bbNum); printStmtID(stmt); printf(" to stmt"); printStmtID(lastStmt); printf("\n"); } #endif for (int i = 0; i < assignmentsCount; i++) { fgRemoveStmt(block, stmt->GetNextStmt()); } GenTree* dstNode; if (originalLHS->OperIs(GT_LCL_FLD)) { dstNode = originalLHS; dstNode->gtType = simdType; dstNode->AsLclFld()->SetFieldSeq(FieldSeqStore::NotAField()); // This may have changed a partial local field into full local field if (dstNode->IsPartialLclFld(this)) { dstNode->gtFlags |= GTF_VAR_USEASG; } else { dstNode->gtFlags &= ~GTF_VAR_USEASG; } } else { GenTree* copyBlkDst = createAddressNodeForSIMDInit(originalLHS, simdSize); if (simdStructNode->OperIsLocal()) { setLclRelatedToSIMDIntrinsic(simdStructNode); } GenTree* copyBlkAddr = copyBlkDst; if (copyBlkAddr->gtOper == GT_LEA) { copyBlkAddr = copyBlkAddr->AsAddrMode()->Base(); } GenTreeLclVarCommon* localDst = copyBlkAddr->IsLocalAddrExpr(); if (localDst != nullptr) { setLclRelatedToSIMDIntrinsic(localDst); } if (simdStructNode->TypeGet() == TYP_BYREF) { assert(simdStructNode->OperIsLocal()); assert(lvaIsImplicitByRefLocal(simdStructNode->AsLclVarCommon()->GetLclNum())); simdStructNode = gtNewIndir(simdType, simdStructNode); } else { assert(varTypeIsSIMD(simdStructNode)); } dstNode = gtNewOperNode(GT_IND, simdType, copyBlkDst); } #ifdef DEBUG if (verbose) { printf("\n" FMT_BB " stmt ", block->bbNum); printStmtID(stmt); printf("(before)\n"); gtDispStmt(stmt); } #endif assert(!simdStructNode->CanCSE()); simdStructNode->ClearDoNotCSE(); tree = gtNewAssignNode(dstNode, simdStructNode); stmt->SetRootNode(tree); // Since we generated a new address node which didn't exist before, // we should expose this address manually here. // TODO-ADDR: Remove this when LocalAddressVisitor transforms all // local field access into LCL_FLDs, at that point we would be // combining 2 existing LCL_FLDs or 2 FIELDs that do not reference // a local and thus cannot result in a new address exposed local. fgMarkAddressExposedLocals(stmt); #ifdef DEBUG if (verbose) { printf("\nReplaced " FMT_BB " stmt", block->bbNum); printStmtID(stmt); printf("(after)\n"); gtDispStmt(stmt); } #endif return true; } #endif // FEATURE_SIMD //------------------------------------------------------------------------ // fgCheckStmtAfterTailCall: check that statements after the tail call stmt // candidate are in one of expected forms, that are desctibed below. // // Return Value: // 'true' if stmts are in the expected form, else 'false'. // bool Compiler::fgCheckStmtAfterTailCall() { // For void calls, we would have created a GT_CALL in the stmt list. // For non-void calls, we would have created a GT_RETURN(GT_CAST(GT_CALL)). // For calls returning structs, we would have a void call, followed by a void return. // For debuggable code, it would be an assignment of the call to a temp // We want to get rid of any of this extra trees, and just leave // the call. Statement* callStmt = fgMorphStmt; Statement* nextMorphStmt = callStmt->GetNextStmt(); // Check that the rest stmts in the block are in one of the following pattern: // 1) ret(void) // 2) ret(cast*(callResultLclVar)) // 3) lclVar = callResultLclVar, the actual ret(lclVar) in another block // 4) nop if (nextMorphStmt != nullptr) { GenTree* callExpr = callStmt->GetRootNode(); if (callExpr->gtOper != GT_ASG) { // The next stmt can be GT_RETURN(TYP_VOID) or GT_RETURN(lclVar), // where lclVar was return buffer in the call for structs or simd. Statement* retStmt = nextMorphStmt; GenTree* retExpr = retStmt->GetRootNode(); noway_assert(retExpr->gtOper == GT_RETURN); nextMorphStmt = retStmt->GetNextStmt(); } else { noway_assert(callExpr->gtGetOp1()->OperIsLocal()); unsigned callResultLclNumber = callExpr->gtGetOp1()->AsLclVarCommon()->GetLclNum(); #if FEATURE_TAILCALL_OPT_SHARED_RETURN // We can have a chain of assignments from the call result to // various inline return spill temps. These are ok as long // as the last one ultimately provides the return value or is ignored. // // And if we're returning a small type we may see a cast // on the source side. while ((nextMorphStmt != nullptr) && (nextMorphStmt->GetRootNode()->OperIs(GT_ASG, GT_NOP))) { if (nextMorphStmt->GetRootNode()->OperIs(GT_NOP)) { nextMorphStmt = nextMorphStmt->GetNextStmt(); continue; } Statement* moveStmt = nextMorphStmt; GenTree* moveExpr = nextMorphStmt->GetRootNode(); GenTree* moveDest = moveExpr->gtGetOp1(); noway_assert(moveDest->OperIsLocal()); // Tunnel through any casts on the source side. GenTree* moveSource = moveExpr->gtGetOp2(); while (moveSource->OperIs(GT_CAST)) { noway_assert(!moveSource->gtOverflow()); moveSource = moveSource->gtGetOp1(); } noway_assert(moveSource->OperIsLocal()); // Verify we're just passing the value from one local to another // along the chain. const unsigned srcLclNum = moveSource->AsLclVarCommon()->GetLclNum(); noway_assert(srcLclNum == callResultLclNumber); const unsigned dstLclNum = moveDest->AsLclVarCommon()->GetLclNum(); callResultLclNumber = dstLclNum; nextMorphStmt = moveStmt->GetNextStmt(); } if (nextMorphStmt != nullptr) #endif { Statement* retStmt = nextMorphStmt; GenTree* retExpr = nextMorphStmt->GetRootNode(); noway_assert(retExpr->gtOper == GT_RETURN); GenTree* treeWithLcl = retExpr->gtGetOp1(); while (treeWithLcl->gtOper == GT_CAST) { noway_assert(!treeWithLcl->gtOverflow()); treeWithLcl = treeWithLcl->gtGetOp1(); } noway_assert(callResultLclNumber == treeWithLcl->AsLclVarCommon()->GetLclNum()); nextMorphStmt = retStmt->GetNextStmt(); } } } return nextMorphStmt == nullptr; } //------------------------------------------------------------------------ // fgCanTailCallViaJitHelper: check whether we can use the faster tailcall // JIT helper on x86. // // Return Value: // 'true' if we can; or 'false' if we should use the generic tailcall mechanism. // bool Compiler::fgCanTailCallViaJitHelper() { #if !defined(TARGET_X86) || defined(UNIX_X86_ABI) || defined(FEATURE_READYTORUN) // On anything except windows X86 we have no faster mechanism available. return false; #else // The JIT helper does not properly handle the case where localloc was used. if (compLocallocUsed) return false; return true; #endif } //------------------------------------------------------------------------ // fgMorphReduceAddOps: reduce successive variable adds into a single multiply, // e.g., i + i + i + i => i * 4. // // Arguments: // tree - tree for reduction // // Return Value: // reduced tree if pattern matches, original tree otherwise // GenTree* Compiler::fgMorphReduceAddOps(GenTree* tree) { // ADD(_, V0) starts the pattern match. if (!tree->OperIs(GT_ADD) || tree->gtOverflow()) { return tree; } #ifndef TARGET_64BIT // Transforming 64-bit ADD to 64-bit MUL on 32-bit system results in replacing // ADD ops with a helper function call. Don't apply optimization in that case. if (tree->TypeGet() == TYP_LONG) { return tree; } #endif GenTree* lclVarTree = tree->AsOp()->gtOp2; GenTree* consTree = tree->AsOp()->gtOp1; GenTree* op1 = consTree; GenTree* op2 = lclVarTree; if (!op2->OperIs(GT_LCL_VAR) || !varTypeIsIntegral(op2)) { return tree; } int foldCount = 0; unsigned lclNum = op2->AsLclVarCommon()->GetLclNum(); // Search for pattern of shape ADD(ADD(ADD(lclNum, lclNum), lclNum), lclNum). while (true) { // ADD(lclNum, lclNum), end of tree if (op1->OperIs(GT_LCL_VAR) && op1->AsLclVarCommon()->GetLclNum() == lclNum && op2->OperIs(GT_LCL_VAR) && op2->AsLclVarCommon()->GetLclNum() == lclNum) { foldCount += 2; break; } // ADD(ADD(X, Y), lclNum), keep descending else if (op1->OperIs(GT_ADD) && !op1->gtOverflow() && op2->OperIs(GT_LCL_VAR) && op2->AsLclVarCommon()->GetLclNum() == lclNum) { foldCount++; op2 = op1->AsOp()->gtOp2; op1 = op1->AsOp()->gtOp1; } // Any other case is a pattern we won't attempt to fold for now. else { return tree; } } // V0 + V0 ... + V0 becomes V0 * foldCount, where postorder transform will optimize // accordingly consTree->BashToConst(foldCount, tree->TypeGet()); GenTree* morphed = gtNewOperNode(GT_MUL, tree->TypeGet(), lclVarTree, consTree); DEBUG_DESTROY_NODE(tree); return morphed; }
1
dotnet/runtime
66,367
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled
When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
jakobbotsch
2022-03-09T00:01:53Z
2022-03-09T12:59:09Z
c9f7f7389e8e9a00d501aef696333b67d218baac
cef825fd5425203ac28d073bf9fccc33c638f179
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled. When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
./src/coreclr/jit/register_arg_convention.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #ifndef __register_arg_convention__ #define __register_arg_convention__ class LclVarDsc; struct InitVarDscInfo { LclVarDsc* varDsc; unsigned varNum; unsigned intRegArgNum; unsigned floatRegArgNum; unsigned maxIntRegArgNum; unsigned maxFloatRegArgNum; bool hasRetBufArg; #ifdef TARGET_ARM // Support back-filling of FP parameters. This is similar to code in gtMorphArgs() that // handles arguments. regMaskTP fltArgSkippedRegMask; bool anyFloatStackArgs; #endif // TARGET_ARM #if FEATURE_FASTTAILCALL // It is used to calculate argument stack size information in byte unsigned stackArgSize; bool hasMultiSlotStruct; #endif // FEATURE_FASTTAILCALL public: // set to initial values void Init(LclVarDsc* lvaTable, bool _hasRetBufArg, unsigned _maxIntRegArgNum, unsigned _maxFloatRegArgNum) { hasRetBufArg = _hasRetBufArg; varDsc = &lvaTable[0]; // the first argument LclVar 0 varNum = 0; // the first argument varNum 0 intRegArgNum = 0; floatRegArgNum = 0; maxIntRegArgNum = _maxIntRegArgNum; maxFloatRegArgNum = _maxFloatRegArgNum; #ifdef TARGET_ARM fltArgSkippedRegMask = RBM_NONE; anyFloatStackArgs = false; #endif // TARGET_ARM #if FEATURE_FASTTAILCALL stackArgSize = 0; hasMultiSlotStruct = false; #endif // FEATURE_FASTTAILCALL } // return ref to current register arg for this type unsigned& regArgNum(var_types type) { return varTypeUsesFloatArgReg(type) ? floatRegArgNum : intRegArgNum; } // Allocate a set of contiguous argument registers. "type" is either an integer // type, indicating to use the integer registers, or a floating-point type, indicating // to use the floating-point registers. The actual type (TYP_FLOAT vs. TYP_DOUBLE) is // ignored. "numRegs" is the number of registers to allocate. Thus, on ARM, to allocate // a double-precision floating-point register, you need to pass numRegs=2. For an HFA, // pass the number of slots/registers needed. // This routine handles floating-point register back-filling on ARM. // Returns the first argument register of the allocated set. unsigned allocRegArg(var_types type, unsigned numRegs = 1); #ifdef TARGET_ARM // We are aligning the register to an ABI-required boundary, such as putting // double-precision floats in even-numbered registers, by skipping one register. // "requiredRegAlignment" is the amount to align to: 1 for no alignment (everything // is 1-aligned), 2 for "double" alignment. // Returns the number of registers skipped. unsigned alignReg(var_types type, unsigned requiredRegAlignment); #endif // TARGET_ARM // Return true if it is an enregisterable type and there is room. // Note that for "type", we only care if it is float or not. In particular, // "numRegs" must be "2" to allocate an ARM double-precision floating-point register. bool canEnreg(var_types type, unsigned numRegs = 1); // Set the fact that we have used up all remaining registers of 'type' // void setAllRegArgUsed(var_types type) { regArgNum(type) = maxRegArgNum(type); } #ifdef TARGET_ARM void setAnyFloatStackArgs() { anyFloatStackArgs = true; } bool existAnyFloatStackArgs() { return anyFloatStackArgs; } #endif // TARGET_ARM private: // return max register arg for this type unsigned maxRegArgNum(var_types type) { return varTypeUsesFloatArgReg(type) ? maxFloatRegArgNum : maxIntRegArgNum; } bool enoughAvailRegs(var_types type, unsigned numRegs = 1); void nextReg(var_types type, unsigned numRegs = 1) { regArgNum(type) = min(regArgNum(type) + numRegs, maxRegArgNum(type)); } }; #endif // __register_arg_convention__
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #ifndef __register_arg_convention__ #define __register_arg_convention__ class LclVarDsc; struct InitVarDscInfo { LclVarDsc* varDsc; unsigned varNum; unsigned intRegArgNum; unsigned floatRegArgNum; unsigned maxIntRegArgNum; unsigned maxFloatRegArgNum; bool hasRetBufArg; #ifdef TARGET_ARM // Support back-filling of FP parameters. This is similar to code in gtMorphArgs() that // handles arguments. regMaskTP fltArgSkippedRegMask; bool anyFloatStackArgs; #endif // TARGET_ARM #if FEATURE_FASTTAILCALL // It is used to calculate argument stack size information in byte unsigned stackArgSize; #endif // FEATURE_FASTTAILCALL public: // set to initial values void Init(LclVarDsc* lvaTable, bool _hasRetBufArg, unsigned _maxIntRegArgNum, unsigned _maxFloatRegArgNum) { hasRetBufArg = _hasRetBufArg; varDsc = &lvaTable[0]; // the first argument LclVar 0 varNum = 0; // the first argument varNum 0 intRegArgNum = 0; floatRegArgNum = 0; maxIntRegArgNum = _maxIntRegArgNum; maxFloatRegArgNum = _maxFloatRegArgNum; #ifdef TARGET_ARM fltArgSkippedRegMask = RBM_NONE; anyFloatStackArgs = false; #endif // TARGET_ARM #if FEATURE_FASTTAILCALL stackArgSize = 0; #endif // FEATURE_FASTTAILCALL } // return ref to current register arg for this type unsigned& regArgNum(var_types type) { return varTypeUsesFloatArgReg(type) ? floatRegArgNum : intRegArgNum; } // Allocate a set of contiguous argument registers. "type" is either an integer // type, indicating to use the integer registers, or a floating-point type, indicating // to use the floating-point registers. The actual type (TYP_FLOAT vs. TYP_DOUBLE) is // ignored. "numRegs" is the number of registers to allocate. Thus, on ARM, to allocate // a double-precision floating-point register, you need to pass numRegs=2. For an HFA, // pass the number of slots/registers needed. // This routine handles floating-point register back-filling on ARM. // Returns the first argument register of the allocated set. unsigned allocRegArg(var_types type, unsigned numRegs = 1); #ifdef TARGET_ARM // We are aligning the register to an ABI-required boundary, such as putting // double-precision floats in even-numbered registers, by skipping one register. // "requiredRegAlignment" is the amount to align to: 1 for no alignment (everything // is 1-aligned), 2 for "double" alignment. // Returns the number of registers skipped. unsigned alignReg(var_types type, unsigned requiredRegAlignment); #endif // TARGET_ARM // Return true if it is an enregisterable type and there is room. // Note that for "type", we only care if it is float or not. In particular, // "numRegs" must be "2" to allocate an ARM double-precision floating-point register. bool canEnreg(var_types type, unsigned numRegs = 1); // Set the fact that we have used up all remaining registers of 'type' // void setAllRegArgUsed(var_types type) { regArgNum(type) = maxRegArgNum(type); } #ifdef TARGET_ARM void setAnyFloatStackArgs() { anyFloatStackArgs = true; } bool existAnyFloatStackArgs() { return anyFloatStackArgs; } #endif // TARGET_ARM private: // return max register arg for this type unsigned maxRegArgNum(var_types type) { return varTypeUsesFloatArgReg(type) ? maxFloatRegArgNum : maxIntRegArgNum; } bool enoughAvailRegs(var_types type, unsigned numRegs = 1); void nextReg(var_types type, unsigned numRegs = 1) { regArgNum(type) = min(regArgNum(type) + numRegs, maxRegArgNum(type)); } }; #endif // __register_arg_convention__
1
dotnet/runtime
66,367
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled
When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
jakobbotsch
2022-03-09T00:01:53Z
2022-03-09T12:59:09Z
c9f7f7389e8e9a00d501aef696333b67d218baac
cef825fd5425203ac28d073bf9fccc33c638f179
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled. When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
./src/coreclr/jit/registerarm.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // clang-format off /*****************************************************************************/ /*****************************************************************************/ #ifndef REGDEF #error Must define REGDEF macro before including this file #endif #ifndef REGALIAS #define REGALIAS(alias, realname) #endif /* REGDEF(name, rnum, mask, sname) */ REGDEF(R0, 0, 0x0001, "r0" ) REGDEF(R1, 1, 0x0002, "r1" ) REGDEF(R2, 2, 0x0004, "r2" ) REGDEF(R3, 3, 0x0008, "r3" ) REGDEF(R4, 4, 0x0010, "r4" ) REGDEF(R5, 5, 0x0020, "r5" ) REGDEF(R6, 6, 0x0040, "r6" ) REGDEF(R7, 7, 0x0080, "r7" ) REGDEF(R8, 8, 0x0100, "r8" ) REGDEF(R9, 9, 0x0200, "r9" ) REGDEF(R10, 10, 0x0400, "r10" ) REGDEF(R11, 11, 0x0800, "r11" ) REGDEF(R12, 12, 0x1000, "r12" ) REGDEF(SP, 13, 0x2000, "sp" ) REGDEF(LR, 14, 0x4000, "lr" ) REGDEF(PC, 15, 0x8000, "pc" ) #define FPBASE 16 #define VFPMASK(x) (((__int64)1) << (x+FPBASE)) REGDEF(F0, 0+FPBASE, VFPMASK(0), "f0") REGDEF(F1, 1+FPBASE, VFPMASK(1), "f1") REGDEF(F2, 2+FPBASE, VFPMASK(2), "f2") REGDEF(F3, 3+FPBASE, VFPMASK(3), "f3") REGDEF(F4, 4+FPBASE, VFPMASK(4), "f4") REGDEF(F5, 5+FPBASE, VFPMASK(5), "f5") REGDEF(F6, 6+FPBASE, VFPMASK(6), "f6") REGDEF(F7, 7+FPBASE, VFPMASK(7), "f7") REGDEF(F8, 8+FPBASE, VFPMASK(8), "f8") REGDEF(F9, 9+FPBASE, VFPMASK(9), "f9") REGDEF(F10, 10+FPBASE, VFPMASK(10), "f10") REGDEF(F11, 11+FPBASE, VFPMASK(11), "f11") REGDEF(F12, 12+FPBASE, VFPMASK(12), "f12") REGDEF(F13, 13+FPBASE, VFPMASK(13), "f13") REGDEF(F14, 14+FPBASE, VFPMASK(14), "f14") REGDEF(F15, 15+FPBASE, VFPMASK(15), "f15") REGDEF(F16, 16+FPBASE, VFPMASK(16), "f16") REGDEF(F17, 17+FPBASE, VFPMASK(17), "f17") REGDEF(F18, 18+FPBASE, VFPMASK(18), "f18") REGDEF(F19, 19+FPBASE, VFPMASK(19), "f19") REGDEF(F20, 20+FPBASE, VFPMASK(20), "f20") REGDEF(F21, 21+FPBASE, VFPMASK(21), "f21") REGDEF(F22, 22+FPBASE, VFPMASK(22), "f22") REGDEF(F23, 23+FPBASE, VFPMASK(23), "f23") REGDEF(F24, 24+FPBASE, VFPMASK(24), "f24") REGDEF(F25, 25+FPBASE, VFPMASK(25), "f25") REGDEF(F26, 26+FPBASE, VFPMASK(26), "f26") REGDEF(F27, 27+FPBASE, VFPMASK(27), "f27") REGDEF(F28, 28+FPBASE, VFPMASK(28), "f28") REGDEF(F29, 29+FPBASE, VFPMASK(29), "f29") REGDEF(F30, 30+FPBASE, VFPMASK(30), "f30") REGDEF(F31, 31+FPBASE, VFPMASK(31), "f31") // Allow us to call R11/FP, SP, LR and PC by their register number names REGALIAS(FP, R11) REGALIAS(R13, SP) REGALIAS(R14, LR) REGALIAS(R15, PC) // This must be last! REGDEF(STK, 32+FPBASE, 0x0000, "STK") /*****************************************************************************/ #undef REGDEF #undef REGALIAS /*****************************************************************************/ // clang-format on
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // clang-format off /*****************************************************************************/ /*****************************************************************************/ #ifndef REGDEF #error Must define REGDEF macro before including this file #endif #ifndef REGALIAS #define REGALIAS(alias, realname) #endif /* REGDEF(name, rnum, mask, sname) */ REGDEF(R0, 0, 0x0001, "r0" ) REGDEF(R1, 1, 0x0002, "r1" ) REGDEF(R2, 2, 0x0004, "r2" ) REGDEF(R3, 3, 0x0008, "r3" ) REGDEF(R4, 4, 0x0010, "r4" ) REGDEF(R5, 5, 0x0020, "r5" ) REGDEF(R6, 6, 0x0040, "r6" ) REGDEF(R7, 7, 0x0080, "r7" ) REGDEF(R8, 8, 0x0100, "r8" ) REGDEF(R9, 9, 0x0200, "r9" ) REGDEF(R10, 10, 0x0400, "r10" ) REGDEF(R11, 11, 0x0800, "r11" ) REGDEF(R12, 12, 0x1000, "r12" ) REGDEF(SP, 13, 0x2000, "sp" ) REGDEF(LR, 14, 0x4000, "lr" ) REGDEF(PC, 15, 0x8000, "pc" ) #define FPBASE 16 #define VFPMASK(x) (((__int64)1) << (x+FPBASE)) REGDEF(F0, 0+FPBASE, VFPMASK(0), "f0") REGDEF(F1, 1+FPBASE, VFPMASK(1), "f1") REGDEF(F2, 2+FPBASE, VFPMASK(2), "f2") REGDEF(F3, 3+FPBASE, VFPMASK(3), "f3") REGDEF(F4, 4+FPBASE, VFPMASK(4), "f4") REGDEF(F5, 5+FPBASE, VFPMASK(5), "f5") REGDEF(F6, 6+FPBASE, VFPMASK(6), "f6") REGDEF(F7, 7+FPBASE, VFPMASK(7), "f7") REGDEF(F8, 8+FPBASE, VFPMASK(8), "f8") REGDEF(F9, 9+FPBASE, VFPMASK(9), "f9") REGDEF(F10, 10+FPBASE, VFPMASK(10), "f10") REGDEF(F11, 11+FPBASE, VFPMASK(11), "f11") REGDEF(F12, 12+FPBASE, VFPMASK(12), "f12") REGDEF(F13, 13+FPBASE, VFPMASK(13), "f13") REGDEF(F14, 14+FPBASE, VFPMASK(14), "f14") REGDEF(F15, 15+FPBASE, VFPMASK(15), "f15") REGDEF(F16, 16+FPBASE, VFPMASK(16), "f16") REGDEF(F17, 17+FPBASE, VFPMASK(17), "f17") REGDEF(F18, 18+FPBASE, VFPMASK(18), "f18") REGDEF(F19, 19+FPBASE, VFPMASK(19), "f19") REGDEF(F20, 20+FPBASE, VFPMASK(20), "f20") REGDEF(F21, 21+FPBASE, VFPMASK(21), "f21") REGDEF(F22, 22+FPBASE, VFPMASK(22), "f22") REGDEF(F23, 23+FPBASE, VFPMASK(23), "f23") REGDEF(F24, 24+FPBASE, VFPMASK(24), "f24") REGDEF(F25, 25+FPBASE, VFPMASK(25), "f25") REGDEF(F26, 26+FPBASE, VFPMASK(26), "f26") REGDEF(F27, 27+FPBASE, VFPMASK(27), "f27") REGDEF(F28, 28+FPBASE, VFPMASK(28), "f28") REGDEF(F29, 29+FPBASE, VFPMASK(29), "f29") REGDEF(F30, 30+FPBASE, VFPMASK(30), "f30") REGDEF(F31, 31+FPBASE, VFPMASK(31), "f31") // Allow us to call R11/FP, SP, LR and PC by their register number names REGALIAS(FP, R11) REGALIAS(R13, SP) REGALIAS(R14, LR) REGALIAS(R15, PC) // This must be last! REGDEF(STK, 32+FPBASE, 0x0000, "STK") /*****************************************************************************/ #undef REGDEF #undef REGALIAS /*****************************************************************************/ // clang-format on
-1
dotnet/runtime
66,367
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled
When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
jakobbotsch
2022-03-09T00:01:53Z
2022-03-09T12:59:09Z
c9f7f7389e8e9a00d501aef696333b67d218baac
cef825fd5425203ac28d073bf9fccc33c638f179
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled. When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
./src/coreclr/inc/getproductversionnumber.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // GetProductVersionNumber.h // // Helper function to retrieve the file version number of a file. // // ====================================================================================== #ifndef __GetProductVersionNumber_h__ #define __GetProductVersionNumber_h__ #include "contract.h" #include "sstring.h" #include "holder.h" #include "ex.h" //--------------------------------------------------------------------------------------- // // Given the full path to an image, return the product version number. // // Arguments: // szFullPath - full path to the image // pdwVersionMS - out parameter; return the most significant 4 bytes of the version number according to // the VS_FIXEDFILEINFO convention // pdwVersionLS - out parameter; return the least significant 4 bytes of the version number according to // the VS_FIXEDFILEINFO convention // // Notes: // Throws on error void inline GetProductVersionNumber(SString &szFullPath, DWORD * pdwVersionMS, DWORD * pdwVersionLS) { WRAPPER_NO_CONTRACT; #ifndef TARGET_UNIX DWORD dwDummy = 0; DWORD dwFileInfoSize = 0; // Get the size of all of the file version information. dwFileInfoSize = GetFileVersionInfoSize(szFullPath, &dwDummy); if (dwFileInfoSize == 0) { ThrowLastError(); } // Create the buffer to store the file information. NewHolder<BYTE> pbFileInfo(new BYTE[dwFileInfoSize]); // Actually retrieve the file version information. if (!GetFileVersionInfo(szFullPath, NULL, dwFileInfoSize, pbFileInfo)) { ThrowLastError(); } // Now retrieve only the relevant version information, which will be returned in a VS_FIXEDFILEINFO. UINT uVersionInfoSize = 0; VS_FIXEDFILEINFO * pVersionInfo = NULL; if (!VerQueryValue(pbFileInfo, W("\\"), reinterpret_cast<LPVOID *>(&pVersionInfo), &uVersionInfoSize)) { ThrowLastError(); } _ASSERTE(uVersionInfoSize == sizeof(VS_FIXEDFILEINFO)); *pdwVersionMS = pVersionInfo->dwProductVersionMS; *pdwVersionLS = pVersionInfo->dwProductVersionLS; #else *pdwVersionMS = 0; *pdwVersionLS = 0; #endif // TARGET_UNIX } #endif // __GetProductVersionNumber_h__
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // GetProductVersionNumber.h // // Helper function to retrieve the file version number of a file. // // ====================================================================================== #ifndef __GetProductVersionNumber_h__ #define __GetProductVersionNumber_h__ #include "contract.h" #include "sstring.h" #include "holder.h" #include "ex.h" //--------------------------------------------------------------------------------------- // // Given the full path to an image, return the product version number. // // Arguments: // szFullPath - full path to the image // pdwVersionMS - out parameter; return the most significant 4 bytes of the version number according to // the VS_FIXEDFILEINFO convention // pdwVersionLS - out parameter; return the least significant 4 bytes of the version number according to // the VS_FIXEDFILEINFO convention // // Notes: // Throws on error void inline GetProductVersionNumber(SString &szFullPath, DWORD * pdwVersionMS, DWORD * pdwVersionLS) { WRAPPER_NO_CONTRACT; #ifndef TARGET_UNIX DWORD dwDummy = 0; DWORD dwFileInfoSize = 0; // Get the size of all of the file version information. dwFileInfoSize = GetFileVersionInfoSize(szFullPath, &dwDummy); if (dwFileInfoSize == 0) { ThrowLastError(); } // Create the buffer to store the file information. NewHolder<BYTE> pbFileInfo(new BYTE[dwFileInfoSize]); // Actually retrieve the file version information. if (!GetFileVersionInfo(szFullPath, NULL, dwFileInfoSize, pbFileInfo)) { ThrowLastError(); } // Now retrieve only the relevant version information, which will be returned in a VS_FIXEDFILEINFO. UINT uVersionInfoSize = 0; VS_FIXEDFILEINFO * pVersionInfo = NULL; if (!VerQueryValue(pbFileInfo, W("\\"), reinterpret_cast<LPVOID *>(&pVersionInfo), &uVersionInfoSize)) { ThrowLastError(); } _ASSERTE(uVersionInfoSize == sizeof(VS_FIXEDFILEINFO)); *pdwVersionMS = pVersionInfo->dwProductVersionMS; *pdwVersionLS = pVersionInfo->dwProductVersionLS; #else *pdwVersionMS = 0; *pdwVersionLS = 0; #endif // TARGET_UNIX } #endif // __GetProductVersionNumber_h__
-1
dotnet/runtime
66,367
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled
When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
jakobbotsch
2022-03-09T00:01:53Z
2022-03-09T12:59:09Z
c9f7f7389e8e9a00d501aef696333b67d218baac
cef825fd5425203ac28d073bf9fccc33c638f179
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled. When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
./src/coreclr/md/enc/stgio.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. //***************************************************************************** // StgIO.h // // // This module handles disk/memory i/o for a generic set of storage solutions, // including: // * File system handle (HFILE) // * IStream // * User supplied memory buffer (non-movable) // // The Read, Write, Seek, ... functions are all directed to the corresponding // method for each type of file, allowing the consumer to use one set of api's. // // File system data can be paged fully into memory in two scenarios: // read: Normal memory mapped file is created to manage paging. // write: A custom paging system provides storage for pages as required. This // data is invalidated when you call Rewrite on the file. // // Transactions and backups are handled in the existing file case only. The // Rewrite function can make a backup of the current contents, and the Restore // function can be used to recover the data into the current scope. The backup // file is flushed to disk (which is slower but safer) after the copy. The // Restore also flushed the recovered changes to disk. Worst case scenario you // get a crash after calling Rewrite but before Restore, in which case you will // have a foo.clb.txn file in the same directory as the source file, foo.clb in // this example. //<REVISIT_TODO> // @FUTURE: issues, // 1. For reading a .clb in an image, it would be great to memory map // only the portion of the file with the .clb in it. //</REVISIT_TODO> //***************************************************************************** #include "stdafx.h" // Standard headers. #include "stgio.h" // Our definitions. #include "corerror.h" #include "posterror.h" #include "pedecoder.h" #include "pedecoder.inl" //********** Types. *********************************************************** #define SMALL_ALLOC_MAP_SIZE (64 * 1024) // 64 kb is the minimum size of virtual // memory you can allocate, so anything // less is a waste of VM resources. #define MIN_WRITE_CACHE_BYTES (16 * 1024) // 16 kb for a write back cache //********** Locals. ********************************************************** HRESULT MapFileError(DWORD error); static void *AllocateMemory(int iSize); static void FreeMemory(void *pbData); inline HRESULT MapFileError(DWORD error) { return (PostError(HRESULT_FROM_WIN32(error))); } // Static to class. int StgIO::m_iPageSize=0; // Size of an OS page. int StgIO::m_iCacheSize=0; // Size for the write cache. //********** Code. ************************************************************ StgIO::StgIO( bool bAutoMap) : // Memory map for read on open? m_bAutoMap(bAutoMap) { CtorInit(); // If the system page size has not been queried, do so now. if (m_iPageSize == 0) { SYSTEM_INFO sInfo; // Some O/S information. // Query the system page size. GetSystemInfo(&sInfo); m_iPageSize = sInfo.dwPageSize; m_iCacheSize = ((MIN_WRITE_CACHE_BYTES - 1) & ~(m_iPageSize - 1)) + m_iPageSize; } } void StgIO::CtorInit() { m_bWriteThrough = false; m_bRewrite = false; m_bFreeMem = false; m_pIStream = 0; m_hFile = INVALID_HANDLE_VALUE; m_hModule = NULL; m_hMapping = 0; m_pBaseData = 0; m_pData = 0; m_cbData = 0; m_fFlags = 0; m_iType = STGIO_NODATA; m_cbOffset = 0; m_rgBuff = 0; m_cbBuff = 0; m_rgPageMap = 0; m_FileType = FILETYPE_UNKNOWN; m_cRef = 1; m_mtMappedType = MTYPE_NOMAPPING; } StgIO::~StgIO() { if (m_rgBuff) { FreeMemory(m_rgBuff); m_rgBuff = 0; } Close(); } //***************************************************************************** // Open the base file on top of: (a) file, (b) memory buffer, or (c) stream. // If create flag is specified, then this will create a new file with the // name supplied. No data is read from an opened file. You must call // MapFileToMem before doing direct pointer access to the contents. //***************************************************************************** HRESULT StgIO::Open( // Return code. LPCWSTR szName, // Name of the storage. int fFlags, // How to open the file. const void *pbBuff, // Optional buffer for memory. ULONG cbBuff, // Size of buffer. IStream *pIStream, // Stream for input. LPSECURITY_ATTRIBUTES pAttributes) // Security token. { HRESULT hr; // If we were given the storage memory to begin with, then use it. if (pbBuff && cbBuff) { _ASSERTE((fFlags & DBPROP_TMODEF_WRITE) == 0); // Save the memory address and size only. No handles. m_pData = (void *) pbBuff; m_cbData = cbBuff; // All access to data will be by memory provided. if ((fFlags & DBPROP_TMODEF_SHAREDMEM) == DBPROP_TMODEF_SHAREDMEM) { // We're taking ownership of this memory m_pBaseData = m_pData; m_iType = STGIO_SHAREDMEM; } else { m_iType = STGIO_MEM; } goto ErrExit; } // Check for data backed by a stream pointer. else if (pIStream) { // If this is for the non-create case, get the size of existing data. if ((fFlags & DBPROP_TMODEF_CREATE) == 0) { LARGE_INTEGER iMove = { { 0, 0 } }; ULARGE_INTEGER iSize; // Need the size of the data so we can map it into memory. if (FAILED(hr = pIStream->Seek(iMove, STREAM_SEEK_END, &iSize))) return (hr); m_cbData = iSize.u.LowPart; } // Else there is nothing. else m_cbData = 0; // Save an addref'd copy of the stream. m_pIStream = pIStream; m_pIStream->AddRef(); // All access to data will be by memory provided. m_iType = STGIO_STREAM; goto ErrExit; } // If not on memory, we need a file to do a create/open. if (!szName || !*szName) { return (PostError(E_INVALIDARG)); } // Check for create of a new file. else if (fFlags & DBPROP_TMODEF_CREATE) { //<REVISIT_TODO>@future: This could chose to open the file in write through // mode, which would provide better Duribility (from ACID props), // but would be much slower.</REVISIT_TODO> // Create the new file, overwriting only if caller allows it. if ((m_hFile = WszCreateFile(szName, GENERIC_READ | GENERIC_WRITE, 0, 0, (fFlags & DBPROP_TMODEF_FAILIFTHERE) ? CREATE_NEW : CREATE_ALWAYS, 0, 0)) == INVALID_HANDLE_VALUE) { return (MapFileError(GetLastError())); } // Data will come from the file. m_iType = STGIO_HFILE; } // For open in read mode, need to open the file on disk. If opening a shared // memory view, it has to be opened already, so no file open. else if ((fFlags & DBPROP_TMODEF_WRITE) == 0) { // We have not opened the file nor loaded it as module _ASSERTE(m_hFile == INVALID_HANDLE_VALUE); _ASSERTE(m_hModule == NULL); // Open the file for read. Sharing is determined by caller, it can // allow other readers or be exclusive. DWORD dwFileSharingFlags = FILE_SHARE_DELETE; if (!(fFlags & DBPROP_TMODEF_EXCLUSIVE)) { dwFileSharingFlags |= FILE_SHARE_READ; #if !defined(DACCESS_COMPILE) && !defined(TARGET_UNIX) // PEDecoder is not defined in DAC // We prefer to use LoadLibrary if we can because it will share already loaded images (used for execution) // which saves virtual memory. We only do this if our caller has indicated that this PE file is trusted // and thus it is OK to do LoadLibrary (note that we still only load it as a resource, which mitigates // most of the security risk anyway). if ((fFlags & DBPROP_TMODEF_TRYLOADLIBRARY) != 0) { m_hModule = WszLoadLibraryEx(szName, NULL, LOAD_LIBRARY_AS_IMAGE_RESOURCE); if (m_hModule != NULL) { m_iType = STGIO_HMODULE; m_mtMappedType = MTYPE_IMAGE; // LoadLibraryEx returns 2 lowest bits indicating how the module was loaded m_pBaseData = m_pData = (void *)(((INT_PTR)m_hModule) & ~(INT_PTR)0x3); PEDecoder peDecoder; if (SUCCEEDED(peDecoder.Init( m_pBaseData, false)) && // relocated peDecoder.CheckNTHeaders()) { m_cbData = peDecoder.GetNTHeaders32()->OptionalHeader.SizeOfImage; } else { // PEDecoder failed on loaded library, let's backout all our changes to this object // and fall back to file mapping m_iType = STGIO_NODATA; m_mtMappedType = MTYPE_NOMAPPING; m_pBaseData = m_pData = NULL; FreeLibrary(m_hModule); m_hModule = NULL; } } } #endif //!DACCESS_COMPILE && !TARGET_UNIX } if (m_hModule == NULL) { // We didn't get the loaded module (we either didn't want to or it failed) HandleHolder hFile(WszCreateFile(szName, GENERIC_READ, dwFileSharingFlags, 0, OPEN_EXISTING, 0, 0)); if (hFile == INVALID_HANDLE_VALUE) return (MapFileError(GetLastError())); // Get size of file. m_cbData = ::SetFilePointer(hFile, 0, 0, FILE_END); // Can't read anything from an empty file. if (m_cbData == 0) return (PostError(CLDB_E_NO_DATA)); // Data will come from the file. m_hFile = hFile.Extract(); m_iType = STGIO_HFILE; } } ErrExit: // If we will ever write, then we need the buffer cache. if (fFlags & DBPROP_TMODEF_WRITE) { // Allocate a cache buffer for writing. if ((m_rgBuff = (BYTE *) AllocateMemory(m_iCacheSize)) == NULL) { Close(); return PostError(OutOfMemory()); } m_cbBuff = 0; } // Save flags for later. m_fFlags = fFlags; if ((szName != NULL) && (*szName != 0)) { WCHAR rcExt[_MAX_PATH]; SplitPath(szName, NULL, 0, NULL, 0, NULL, 0, rcExt, _MAX_PATH); if (SString::_wcsicmp(rcExt, W(".obj")) == 0) { m_FileType = FILETYPE_NTOBJ; } else if (SString::_wcsicmp(rcExt, W(".tlb")) == 0) { m_FileType = FILETYPE_TLB; } } // For auto map case, map the view of the file as part of open. if (m_bAutoMap && (m_iType == STGIO_HFILE || m_iType == STGIO_STREAM) && !(fFlags & DBPROP_TMODEF_CREATE)) { void * ptr; ULONG cb; if (FAILED(hr = MapFileToMem(ptr, &cb, pAttributes))) { Close(); return hr; } } return S_OK; } // StgIO::Open //***************************************************************************** // Shut down the file handles and allocated objects. //***************************************************************************** void StgIO::Close() { switch (m_iType) { // Free any allocated memory. case STGIO_SHAREDMEM: if (m_pBaseData != NULL) { CoTaskMemFree(m_pBaseData); m_pBaseData = NULL; break; } FALLTHROUGH; case STGIO_MEM: case STGIO_HFILEMEM: if (m_bFreeMem && m_pBaseData) { FreeMemory(m_pBaseData); m_pBaseData = m_pData = 0; } // Intentional fall through to file case, if we kept handle open. FALLTHROUGH; case STGIO_HFILE: { // Free the file handle. if (m_hFile != INVALID_HANDLE_VALUE) CloseHandle(m_hFile); // If we allocated space for in memory paging, then free it. } break; case STGIO_HMODULE: { if (m_hModule != NULL) FreeLibrary(m_hModule); m_hModule = NULL; break; } // Free the stream pointer. case STGIO_STREAM: { if (m_pIStream != NULL) m_pIStream->Release(); } break; // Weird to shut down what you didn't open, isn't it? Allow for // error case where dtor shuts down as an afterthought. case STGIO_NODATA: default: return; } // Free any page map and base data. FreePageMap(); // Reset state values so we don't get confused. CtorInit(); } //***************************************************************************** // Called to read the data into allocated memory and release the backing store. // Only available on read-only data. //***************************************************************************** HRESULT StgIO::LoadFileToMemory() { HRESULT hr; void *pData; // Allocated buffer for file. ULONG cbData; // Size of the data. ULONG cbRead = 0; // Data actually read. // Make sure it is a read-only file. if (m_fFlags & DBPROP_TMODEF_WRITE) return E_INVALIDARG; // Try to allocate the buffer. cbData = m_cbData; pData = AllocateMemory(cbData); IfNullGo(pData); // Try to read the file into the buffer. IfFailGo(Read(pData, cbData, &cbRead)); if (cbData != cbRead) { _ASSERTE_MSG(FALSE, "Read didn't succeed."); IfFailGo(CLDB_E_FILE_CORRUPT); } // Done with the old data. Close(); // Open with new data. hr = Open(NULL /* szName */, STGIO_READ, pData, cbData, NULL /* IStream* */, NULL /* lpSecurityAttributes */); _ASSERTE(SUCCEEDED(hr)); // should not be a failure code path with open on buffer. // Mark the new memory so that it will be freed later. m_pBaseData = m_pData; m_bFreeMem = true; ErrExit: if (FAILED(hr) && pData) FreeMemory(pData); return hr; } // StgIO::LoadFileToMemory //***************************************************************************** // Read data from the storage source. This will handle all types of backing // storage from mmf, streams, and file handles. No read ahead or MRU // caching is done. //***************************************************************************** HRESULT StgIO::Read( // Return code. void *pbBuff, // Write buffer here. ULONG cbBuff, // How much to read. ULONG *pcbRead) // How much read. { ULONG cbCopy; // For boundary checks. void *pbData; // Data buffer for mem read. HRESULT hr = S_OK; // Validate arguments, don't call if you don't need to. _ASSERTE(pbBuff != 0); _ASSERTE(cbBuff > 0); // Get the data based on type. switch (m_iType) { // For data on file, there are two possiblities: // (1) We have an in memory backing store we should use, or // (2) We just need to read from the file. case STGIO_HFILE: case STGIO_HMODULE: { _ASSERTE((m_hFile != INVALID_HANDLE_VALUE) || (m_hModule != NULL)); // Backing store does its own paging. if (IsBackingStore() || IsMemoryMapped()) { // Force the data into memory. if (FAILED(hr = GetPtrForMem(GetCurrentOffset(), cbBuff, pbData))) goto ErrExit; // Copy it back for the user and save the size. memcpy(pbBuff, pbData, cbBuff); if (pcbRead) *pcbRead = cbBuff; } // If there is no backing store, this is just a read operation. else { _ASSERTE((m_iType == STGIO_HFILE) && (m_hFile != INVALID_HANDLE_VALUE)); _ASSERTE(m_hModule == NULL); ULONG cbTemp = 0; if (!pcbRead) pcbRead = &cbTemp; hr = ReadFromDisk(pbBuff, cbBuff, pcbRead); m_cbOffset += *pcbRead; } } break; // Data in a stream is always just read. case STGIO_STREAM: { _ASSERTE((IStream *) m_pIStream); if (!pcbRead) pcbRead = &cbCopy; *pcbRead = 0; hr = m_pIStream->Read(pbBuff, cbBuff, pcbRead); if (SUCCEEDED(hr)) m_cbOffset += *pcbRead; } break; // Simply copy the data from our data. case STGIO_MEM: case STGIO_SHAREDMEM: case STGIO_HFILEMEM: { _ASSERTE(m_pData && m_cbData); // Check for read past end of buffer and adjust. if (GetCurrentOffset() + cbBuff > m_cbData) cbCopy = m_cbData - GetCurrentOffset(); else cbCopy = cbBuff; // Copy the data into the callers buffer. memcpy(pbBuff, (void *) ((DWORD_PTR)m_pData + GetCurrentOffset()), cbCopy); if (pcbRead) *pcbRead = cbCopy; // Save a logical offset. m_cbOffset += cbCopy; } break; case STGIO_NODATA: default: _ASSERTE(0); break; } ErrExit: return (hr); } //***************************************************************************** // Write to disk. This function will cache up to a page of data in a buffer // and peridocially flush it on overflow and explicit request. This makes it // safe to do lots of small writes without too much performance overhead. //***************************************************************************** HRESULT StgIO::Write( // true/false. const void *pbBuff, // Data to write. ULONG cbWrite, // How much data to write. ULONG *pcbWritten) // How much did get written. { ULONG cbWriteIn=cbWrite; // Track amount written. ULONG cbCopy; HRESULT hr = S_OK; _ASSERTE(m_rgBuff != 0); _ASSERTE(cbWrite); while (cbWrite) { // In the case where the buffer is already huge, write the whole thing // and avoid the cache. if (m_cbBuff == 0 && cbWrite >= (ULONG) m_iPageSize) { if (SUCCEEDED(hr = WriteToDisk(pbBuff, cbWrite, pcbWritten))) m_cbOffset += cbWrite; break; } // Otherwise cache as much as we can and flush. else { // Determine how much data goes into the cache buffer. cbCopy = m_iPageSize - m_cbBuff; cbCopy = min(cbCopy, cbWrite); // Copy the data into the cache and adjust counts. memcpy(&m_rgBuff[m_cbBuff], pbBuff, cbCopy); pbBuff = (void *) ((DWORD_PTR)pbBuff + cbCopy); m_cbBuff += cbCopy; m_cbOffset += cbCopy; cbWrite -= cbCopy; // If there is enough data, then flush it to disk and reset count. if (m_cbBuff >= (ULONG) m_iPageSize) { if (FAILED(hr = FlushCache())) break; } } } // Return value for caller. if (SUCCEEDED(hr) && pcbWritten) *pcbWritten = cbWriteIn; return (hr); } //***************************************************************************** // Moves the file pointer to the new location. This handles the different // types of storage systems. //***************************************************************************** HRESULT StgIO::Seek( // New offset. int lVal, // How much to move. ULONG fMoveType) // Direction, use Win32 FILE_xxxx. { ULONG cbRtn = 0; HRESULT hr = NOERROR; _ASSERTE(fMoveType >= FILE_BEGIN && fMoveType <= FILE_END); // Action taken depends on type of storage. switch (m_iType) { case STGIO_HFILE: { // Use the file system's move. _ASSERTE(m_hFile != INVALID_HANDLE_VALUE); cbRtn = ::SetFilePointer(m_hFile, lVal, 0, fMoveType); // Save the location redundantly. if (cbRtn != 0xffffffff) { // make sure that m_cbOffset will stay within range if (cbRtn > m_cbData || cbRtn < 0) { IfFailGo(STG_E_INVALIDFUNCTION); } m_cbOffset = cbRtn; } } break; case STGIO_STREAM: { LARGE_INTEGER iMove; ULARGE_INTEGER iNewLoc; // Need a 64-bit int. iMove.QuadPart = lVal; // The move types are named differently, but have same value. if (FAILED(hr = m_pIStream->Seek(iMove, fMoveType, &iNewLoc))) return (hr); // make sure that m_cbOffset will stay within range if (iNewLoc.u.LowPart > m_cbData || iNewLoc.u.LowPart < 0) IfFailGo(STG_E_INVALIDFUNCTION); // Save off only out location. m_cbOffset = iNewLoc.u.LowPart; } break; case STGIO_MEM: case STGIO_SHAREDMEM: case STGIO_HFILEMEM: case STGIO_HMODULE: { // We own the offset, so change our value. switch (fMoveType) { case FILE_BEGIN: // make sure that m_cbOffset will stay within range if ((ULONG) lVal > m_cbData || lVal < 0) { IfFailGo(STG_E_INVALIDFUNCTION); } m_cbOffset = lVal; break; case FILE_CURRENT: // make sure that m_cbOffset will stay within range if (m_cbOffset + lVal > m_cbData) { IfFailGo(STG_E_INVALIDFUNCTION); } m_cbOffset = m_cbOffset + lVal; break; case FILE_END: _ASSERTE(lVal < (LONG) m_cbData); // make sure that m_cbOffset will stay within range if (m_cbData + lVal > m_cbData) { IfFailGo(STG_E_INVALIDFUNCTION); } m_cbOffset = m_cbData + lVal; break; } cbRtn = m_cbOffset; } break; // Weird to seek with no data. case STGIO_NODATA: default: _ASSERTE(0); break; } ErrExit: return hr; } //***************************************************************************** // Retrieves the current offset for the storage being used. This value is // tracked based on Read, Write, and Seek operations. //***************************************************************************** ULONG StgIO::GetCurrentOffset() // Current offset. { return (m_cbOffset); } //***************************************************************************** // Map the file contents to a memory mapped file and return a pointer to the // data. For read/write with a backing store, map the file using an internal // paging system. //***************************************************************************** HRESULT StgIO::MapFileToMem( // Return code. void *&ptr, // Return pointer to file data. ULONG *pcbSize, // Return size of data. LPSECURITY_ATTRIBUTES pAttributes) // Security token. { char rcShared[MAXSHMEM]; // ANSI version of shared name. HRESULT hr = S_OK; // Don't penalize for multiple calls. Also, allow calls for mem type so // callers don't need to do so much checking. if (IsBackingStore() || IsMemoryMapped() || (m_iType == STGIO_MEM) || (m_iType == STGIO_SHAREDMEM) || (m_iType == STGIO_HFILEMEM)) { ptr = m_pData; if (pcbSize) *pcbSize = m_cbData; return (S_OK); } //#CopySmallFiles // Check the size of the data we want to map. If it is small enough, then // simply allocate a chunk of memory from a finer grained heap. This saves // virtual memory space, page table entries, and should reduce overall working set. // Also, open for read/write needs a full backing store. if ((m_cbData <= SMALL_ALLOC_MAP_SIZE) && (SMALL_ALLOC_MAP_SIZE > 0)) { DWORD cbRead = m_cbData; _ASSERTE(m_pData == 0); // Just malloc a chunk of data to use. m_pBaseData = m_pData = AllocateMemory(m_cbData); if (!m_pData) { hr = OutOfMemory(); goto ErrExit; } // Read all of the file contents into this piece of memory. IfFailGo( Seek(0, FILE_BEGIN) ); if (FAILED(hr = Read(m_pData, cbRead, &cbRead))) { FreeMemory(m_pData); m_pData = 0; goto ErrExit; } _ASSERTE(cbRead == m_cbData); // If the file isn't being opened for exclusive mode, then free it. // If it is for exclusive, then we need to keep the handle open so the // file is locked, preventing other readers. Also leave it open if // in read/write mode so we can truncate and rewrite. if (m_hFile == INVALID_HANDLE_VALUE || ((m_fFlags & DBPROP_TMODEF_EXCLUSIVE) == 0 && (m_fFlags & DBPROP_TMODEF_WRITE) == 0)) { // If there was a handle open, then free it. if (m_hFile != INVALID_HANDLE_VALUE) { VERIFY(CloseHandle(m_hFile)); m_hFile = INVALID_HANDLE_VALUE; } // Free the stream pointer. else if (m_pIStream != 0) { m_pIStream->Release(); m_pIStream = 0; } // Switch the type to memory only access. m_iType = STGIO_MEM; } else m_iType = STGIO_HFILEMEM; // Free the memory when we shut down. m_bFreeMem = true; } // Finally, a real mapping file must be created. else { // Now we will map, so better have it right. _ASSERTE(m_hFile != INVALID_HANDLE_VALUE || m_iType == STGIO_STREAM); _ASSERTE(m_rgPageMap == 0); // For read mode, use a memory mapped file since the size will never // change for the life of the handle. if ((m_fFlags & DBPROP_TMODEF_WRITE) == 0 && m_iType != STGIO_STREAM) { // Create a mapping object for the file. _ASSERTE(m_hMapping == 0); DWORD dwProtectionFlags = PAGE_READONLY; if ((m_hMapping = WszCreateFileMapping(m_hFile, pAttributes, dwProtectionFlags, 0, 0, nullptr)) == 0) { return (MapFileError(GetLastError())); } m_mtMappedType = MTYPE_FLAT; // Check to see if the memory already exists, in which case we have // no guarantees it is the right piece of data. if (GetLastError() == ERROR_ALREADY_EXISTS) { hr = PostError(CLDB_E_SMDUPLICATE, rcShared); goto ErrExit; } // Now map the file into memory so we can read from pointer access. // <REVISIT_TODO>Note: Added a check for IsBadReadPtr per the Services team which // indicates that under some conditions this API can give you back // a totally bogus pointer.</REVISIT_TODO> if ((m_pBaseData = m_pData = MapViewOfFile(m_hMapping, FILE_MAP_READ, 0, 0, 0)) == 0) { hr = MapFileError(GetLastError()); if (SUCCEEDED(hr)) { _ASSERTE_MSG(FALSE, "Error code doesn't indicate error."); hr = PostError(CLDB_E_FILE_CORRUPT); } // In case we got back a bogus pointer. m_pBaseData = m_pData = NULL; goto ErrExit; } } // In write mode, we need the hybrid combination of being able to back up // the data in memory via cache, but then later rewrite the contents and // throw away our cached copy. Memory mapped files are not good for this // case due to poor write characteristics. else { ULONG iMaxSize; // How much memory required for file. // Figure out how many pages we'll require, round up actual data // size to page size. iMaxSize = (((m_cbData - 1) & ~(m_iPageSize - 1)) + m_iPageSize); // Check integer overflow in previous statement if (iMaxSize < m_cbData) { IfFailGo(PostError(COR_E_OVERFLOW)); } // Allocate a bit vector to track loaded pages. if ((m_rgPageMap = new (nothrow) BYTE[iMaxSize / m_iPageSize]) == 0) return (PostError(OutOfMemory())); memset(m_rgPageMap, 0, sizeof(BYTE) * (iMaxSize / m_iPageSize)); // Allocate space for the file contents. if ((m_pBaseData = m_pData = ::ClrVirtualAlloc(0, iMaxSize, MEM_RESERVE, PAGE_NOACCESS)) == 0) { hr = PostError(OutOfMemory()); goto ErrExit; } } } // Reset any changes made by mapping. IfFailGo( Seek(0, FILE_BEGIN) ); ErrExit: // Check for errors and clean up. if (FAILED(hr)) { if (m_hMapping) CloseHandle(m_hMapping); m_hMapping = 0; m_pBaseData = m_pData = 0; m_cbData = 0; } ptr = m_pData; if (pcbSize) *pcbSize = m_cbData; return (hr); } //***************************************************************************** // Free the mapping object for shared memory but keep the rest of the internal // state intact. //***************************************************************************** HRESULT StgIO::ReleaseMappingObject() // Return code. { // Check type first. if (m_iType != STGIO_SHAREDMEM) { _ASSERTE(FALSE); return S_OK; } // Must have an allocated handle. _ASSERTE(m_hMapping != 0); // Freeing the mapping object doesn't do any good if you still have the file. _ASSERTE(m_hFile == INVALID_HANDLE_VALUE); // Unmap the memory we allocated before freeing the handle. But keep the // memory address intact. if (m_pData) VERIFY(UnmapViewOfFile(m_pData)); // Free the handle. if (m_hMapping != 0) { VERIFY(CloseHandle(m_hMapping)); m_hMapping = 0; } return S_OK; } //***************************************************************************** // Resets the logical base address and size to the value given. This is for // cases like finding a section embedded in another format, like the .clb inside // of an image. GetPtrForMem, Read, and Seek will then behave as though only // data from pbStart to cbSize is valid. //***************************************************************************** HRESULT StgIO::SetBaseRange( // Return code. void *pbStart, // Start of file data. ULONG cbSize) // How big is the range. { if (m_iType == STGIO_SHAREDMEM) { // The base range must be inside of the current range. _ASSERTE((m_pBaseData != NULL) && (m_cbData != 0)); _ASSERTE(((LONG_PTR) pbStart >= (LONG_PTR) m_pBaseData)); _ASSERTE(((LONG_PTR) pbStart + cbSize <= (LONG_PTR) m_pBaseData + m_cbData)); } // Save the base range per user request. m_pData = pbStart; m_cbData = cbSize; return S_OK; } //***************************************************************************** // Caller wants a pointer to a chunk of the file. This function will make sure // that the memory for that chunk has been committed and will load from the // file if required. This algorithm attempts to load no more data from disk // than is necessary. It walks the required pages from lowest to highest, // and for each block of unloaded pages, the memory is committed and the data // is read from disk. If all pages are unloaded, all of them are loaded at // once to speed throughput from disk. //***************************************************************************** HRESULT StgIO::GetPtrForMem( // Return code. ULONG cbStart, // Where to start getting memory. ULONG cbSize, // How much data. void *&ptr) // Return pointer to memory here. { int iFirst, iLast; // First and last page required. ULONG iOffset, iSize; // For committing ranges of memory. int i, j; // Loop control. HRESULT hr; // We need either memory (mmf or user supplied) or a backing store to // return a pointer. Call Read if you don't have these. if (!IsBackingStore() && m_pData == 0) return (PostError(BadError(E_UNEXPECTED))); // Validate the caller isn't asking for a data value out of range. if (!(ClrSafeInt<ULONG>::addition(cbStart, cbSize, iOffset) && (iOffset <= m_cbData))) return (PostError(E_INVALIDARG)); // This code will check for pages that need to be paged from disk in // order for us to return a pointer to that memory. if (IsBackingStore()) { // Backing store is bogus when in rewrite mode. if (m_bRewrite) return (PostError(BadError(E_UNEXPECTED))); // Must have the page map to continue. _ASSERTE(m_rgPageMap && m_iPageSize && m_pData); // Figure out the first and last page that are required for commit. iFirst = cbStart / m_iPageSize; iLast = (cbStart + cbSize - 1) / m_iPageSize; // Avoid confusion. ptr = 0; // Do a smart load of every page required. Do not reload pages that have // already been brought in from disk. //<REVISIT_TODO>@FUTURE: add an optimization so that when all pages have been faulted, we no // longer to a page by page search.</REVISIT_TODO> for (i=iFirst; i<=iLast; ) { // Find the first page that hasn't already been loaded. while (GetBit(m_rgPageMap, i) && i<=iLast) ++i; if (i > iLast) break; // Offset for first thing to load. iOffset = i * m_iPageSize; iSize = 0; // See how many in a row have not been loaded. for (j=i; i<=iLast && !GetBit(m_rgPageMap, i); i++) { // Safe: iSize += m_iPageSize; if (!(ClrSafeInt<ULONG>::addition(iSize, m_iPageSize, iSize))) { return PostError(E_INVALIDARG); } } // First commit the memory for this part of the file. if (::ClrVirtualAlloc((void *) ((DWORD_PTR) m_pData + iOffset), iSize, MEM_COMMIT, PAGE_READWRITE) == 0) return (PostError(OutOfMemory())); // Now load that portion of the file from disk. if (FAILED(hr = Seek(iOffset, FILE_BEGIN)) || FAILED(hr = ReadFromDisk((void *) ((DWORD_PTR) m_pData + iOffset), iSize, 0))) { return (hr); } // Change the memory to read only to avoid any modifications. Any faults // that occur indicate a bug whereby the engine is trying to write to // protected memory. _ASSERTE(::ClrVirtualAlloc((void *) ((DWORD_PTR) m_pData + iOffset), iSize, MEM_COMMIT, PAGE_READONLY) != 0); // Record each new loaded page. for (; j<i; j++) SetBit(m_rgPageMap, j, true); } // Everything was brought into memory, so now return pointer to caller. ptr = (void *) ((DWORD_PTR) m_pData + cbStart); } // Memory version or memory mapped file work the same way. else if (IsMemoryMapped() || (m_iType == STGIO_MEM) || (m_iType == STGIO_SHAREDMEM) || (m_iType == STGIO_HFILEMEM)) { if (!(cbStart <= m_cbData)) return (PostError(E_INVALIDARG)); ptr = (void *) ((DWORD_PTR) m_pData + cbStart); } // What's left?! Add some defense. else { _ASSERTE(0); ptr = 0; return (PostError(BadError(E_UNEXPECTED))); } return (S_OK); } //***************************************************************************** // For cached writes, flush the cache to the data store. //***************************************************************************** HRESULT StgIO::FlushCache() { ULONG cbWritten; HRESULT hr; if (m_cbBuff) { if (FAILED(hr = WriteToDisk(m_rgBuff, m_cbBuff, &cbWritten))) return (hr); m_cbBuff = 0; } return (S_OK); } //***************************************************************************** // Tells the file system to flush any cached data it may have. This is // expensive, but if successful guarantees you won't lose writes short of // a disk failure. //***************************************************************************** HRESULT StgIO::FlushFileBuffers() { _ASSERTE(!IsReadOnly()); if (m_hFile != INVALID_HANDLE_VALUE) { if (::FlushFileBuffers(m_hFile)) return (S_OK); else return (MapFileError(GetLastError())); } return (S_OK); } //***************************************************************************** // Called after a successful rewrite of an existing file. The in memory // backing store is no longer valid because all new data is in memory and // on disk. This is essentially the same state as created, so free up some // working set and remember this state. //***************************************************************************** HRESULT StgIO::ResetBackingStore() // Return code. { // Don't be calling this function for read only data. _ASSERTE(!IsReadOnly()); // Free up any backing store data we no longer need now that everything // is in memory. FreePageMap(); return (S_OK); } // // Private. // //***************************************************************************** // This version will force the data in cache out to disk for real. The code // can handle the different types of storage we might be sitting on based on // the open type. //***************************************************************************** HRESULT StgIO::WriteToDisk( // Return code. const void *pbBuff, // Buffer to write. ULONG cbWrite, // How much. ULONG *pcbWritten) // Return how much written. { ULONG cbWritten; // Buffer for write funcs. HRESULT hr = S_OK; // Pretty obvious. _ASSERTE(!IsReadOnly()); // Always need a buffer to write this data to. if (!pcbWritten) pcbWritten = &cbWritten; // Action taken depends on type of storage. switch (m_iType) { case STGIO_HFILE: case STGIO_HFILEMEM: { // Use the file system's move. _ASSERTE(m_hFile != INVALID_HANDLE_VALUE); // Do the write to disk. if (!::WriteFile(m_hFile, pbBuff, cbWrite, pcbWritten, 0)) hr = MapFileError(GetLastError()); } break; // Free the stream pointer. case STGIO_STREAM: { // Delegate write to stream code. hr = m_pIStream->Write(pbBuff, cbWrite, pcbWritten); } break; // We cannot write to fixed read/only memory or LoadLibrary module. case STGIO_HMODULE: case STGIO_MEM: case STGIO_SHAREDMEM: _ASSERTE(0); hr = BadError(E_UNEXPECTED); break; // Weird to seek with no data. case STGIO_NODATA: default: _ASSERTE(0); break; } return (hr); } //***************************************************************************** // This version only reads from disk. //***************************************************************************** HRESULT StgIO::ReadFromDisk( // Return code. void *pbBuff, // Write buffer here. ULONG cbBuff, // How much to read. ULONG *pcbRead) // How much read. { ULONG cbRead; _ASSERTE(m_iType == STGIO_HFILE || m_iType == STGIO_STREAM); // Need to have a buffer. if (!pcbRead) pcbRead = &cbRead; // Read only from file to avoid recursive logic. if (m_iType == STGIO_HFILE || m_iType == STGIO_HFILEMEM) { if (::ReadFile(m_hFile, pbBuff, cbBuff, pcbRead, 0)) return (S_OK); return (MapFileError(GetLastError())); } // Read directly from stream. else { return (m_pIStream->Read(pbBuff, cbBuff, pcbRead)); } } //***************************************************************************** // Copy the contents of the file for this storage to the target path. //***************************************************************************** HRESULT StgIO::CopyFileInternal( // Return code. LPCWSTR szTo, // Target save path for file. int bFailIfThere, // true to fail if target exists. int bWriteThrough) // Should copy be written through OS cache. { DWORD iCurrent; // Save original location. DWORD cbRead; // Byte count for buffer. DWORD cbWrite; // Check write of bytes. const DWORD cbBuff = 4096; // Size of buffer for copy (in bytes). BYTE *pBuff = (BYTE*)alloca(cbBuff); // Buffer for copy. HANDLE hFile; // Target file. HRESULT hr = S_OK; // Create target file. if ((hFile = ::WszCreateFile(szTo, GENERIC_WRITE, 0, 0, (bFailIfThere) ? CREATE_NEW : CREATE_ALWAYS, (bWriteThrough) ? FILE_FLAG_WRITE_THROUGH : 0, 0)) == INVALID_HANDLE_VALUE) { return (MapFileError(GetLastError())); } // Save current location and reset it later. iCurrent = ::SetFilePointer(m_hFile, 0, 0, FILE_CURRENT); ::SetFilePointer(m_hFile, 0, 0, FILE_BEGIN); // Copy while there are bytes. while (::ReadFile(m_hFile, pBuff, cbBuff, &cbRead, 0) && cbRead) { if (!::WriteFile(hFile, pBuff, cbRead, &cbWrite, 0) || cbWrite != cbRead) { hr = STG_E_WRITEFAULT; break; } } // Reset file offset. ::SetFilePointer(m_hFile, iCurrent, 0, FILE_BEGIN); // Close target. if (!bWriteThrough) VERIFY(::FlushFileBuffers(hFile)); ::CloseHandle(hFile); return (hr); } //***************************************************************************** // Free the data used for backing store from disk in read/write scenario. //***************************************************************************** void StgIO::FreePageMap() { // If a small file was allocated, then free that memory. if (m_bFreeMem && m_pBaseData) FreeMemory(m_pBaseData); // For mmf, close handles and free resources. else if (m_hMapping && m_pBaseData) { VERIFY(UnmapViewOfFile(m_pBaseData)); VERIFY(CloseHandle(m_hMapping)); } // For our own system, free memory. else if (m_rgPageMap && m_pBaseData) { delete [] m_rgPageMap; m_rgPageMap = 0; VERIFY(::ClrVirtualFree(m_pBaseData, (((m_cbData - 1) & ~(m_iPageSize - 1)) + m_iPageSize), MEM_DECOMMIT)); VERIFY(::ClrVirtualFree(m_pBaseData, 0, MEM_RELEASE)); m_pBaseData = 0; m_cbData = 0; } m_pBaseData = 0; m_hMapping = 0; m_cbData = 0; } //***************************************************************************** // Check the given pointer and ensure it is aligned correct. Return true // if it is aligned, false if it is not. //***************************************************************************** int StgIO::IsAlignedPtr(ULONG_PTR Value, int iAlignment) { HRESULT hr; void *ptrStart = NULL; if ((m_iType == STGIO_STREAM) || (m_iType == STGIO_SHAREDMEM) || (m_iType == STGIO_MEM)) { return ((Value - (ULONG_PTR) m_pData) % iAlignment == 0); } else { hr = GetPtrForMem(0, 1, ptrStart); _ASSERTE(hr == S_OK && "GetPtrForMem failed"); _ASSERTE(Value > (ULONG_PTR) ptrStart); return (((Value - (ULONG_PTR) ptrStart) % iAlignment) == 0); } } // int StgIO::IsAlignedPtr() //***************************************************************************** // These helper functions are used to allocate fairly large pieces of memory, // more than should be taken from the runtime heap, but less that would require // virtual memory overhead. //***************************************************************************** // #define _TRACE_MEM_ 1 void *AllocateMemory(int iSize) { void * ptr; ptr = new (nothrow) BYTE[iSize]; #if defined(_DEBUG) && defined(_TRACE_MEM_) static int i=0; DbgWriteEx(W("AllocateMemory: (%d) 0x%08x, size %d\n"), ++i, ptr, iSize); #endif return (ptr); } void FreeMemory(void *pbData) { #if defined(_DEBUG) && defined(_TRACE_MEM_) static int i=0; DbgWriteEx(W("FreeMemory: (%d) 0x%08x\n"), ++i, pbData); #endif _ASSERTE(pbData); delete [] (BYTE *) pbData; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. //***************************************************************************** // StgIO.h // // // This module handles disk/memory i/o for a generic set of storage solutions, // including: // * File system handle (HFILE) // * IStream // * User supplied memory buffer (non-movable) // // The Read, Write, Seek, ... functions are all directed to the corresponding // method for each type of file, allowing the consumer to use one set of api's. // // File system data can be paged fully into memory in two scenarios: // read: Normal memory mapped file is created to manage paging. // write: A custom paging system provides storage for pages as required. This // data is invalidated when you call Rewrite on the file. // // Transactions and backups are handled in the existing file case only. The // Rewrite function can make a backup of the current contents, and the Restore // function can be used to recover the data into the current scope. The backup // file is flushed to disk (which is slower but safer) after the copy. The // Restore also flushed the recovered changes to disk. Worst case scenario you // get a crash after calling Rewrite but before Restore, in which case you will // have a foo.clb.txn file in the same directory as the source file, foo.clb in // this example. //<REVISIT_TODO> // @FUTURE: issues, // 1. For reading a .clb in an image, it would be great to memory map // only the portion of the file with the .clb in it. //</REVISIT_TODO> //***************************************************************************** #include "stdafx.h" // Standard headers. #include "stgio.h" // Our definitions. #include "corerror.h" #include "posterror.h" #include "pedecoder.h" #include "pedecoder.inl" //********** Types. *********************************************************** #define SMALL_ALLOC_MAP_SIZE (64 * 1024) // 64 kb is the minimum size of virtual // memory you can allocate, so anything // less is a waste of VM resources. #define MIN_WRITE_CACHE_BYTES (16 * 1024) // 16 kb for a write back cache //********** Locals. ********************************************************** HRESULT MapFileError(DWORD error); static void *AllocateMemory(int iSize); static void FreeMemory(void *pbData); inline HRESULT MapFileError(DWORD error) { return (PostError(HRESULT_FROM_WIN32(error))); } // Static to class. int StgIO::m_iPageSize=0; // Size of an OS page. int StgIO::m_iCacheSize=0; // Size for the write cache. //********** Code. ************************************************************ StgIO::StgIO( bool bAutoMap) : // Memory map for read on open? m_bAutoMap(bAutoMap) { CtorInit(); // If the system page size has not been queried, do so now. if (m_iPageSize == 0) { SYSTEM_INFO sInfo; // Some O/S information. // Query the system page size. GetSystemInfo(&sInfo); m_iPageSize = sInfo.dwPageSize; m_iCacheSize = ((MIN_WRITE_CACHE_BYTES - 1) & ~(m_iPageSize - 1)) + m_iPageSize; } } void StgIO::CtorInit() { m_bWriteThrough = false; m_bRewrite = false; m_bFreeMem = false; m_pIStream = 0; m_hFile = INVALID_HANDLE_VALUE; m_hModule = NULL; m_hMapping = 0; m_pBaseData = 0; m_pData = 0; m_cbData = 0; m_fFlags = 0; m_iType = STGIO_NODATA; m_cbOffset = 0; m_rgBuff = 0; m_cbBuff = 0; m_rgPageMap = 0; m_FileType = FILETYPE_UNKNOWN; m_cRef = 1; m_mtMappedType = MTYPE_NOMAPPING; } StgIO::~StgIO() { if (m_rgBuff) { FreeMemory(m_rgBuff); m_rgBuff = 0; } Close(); } //***************************************************************************** // Open the base file on top of: (a) file, (b) memory buffer, or (c) stream. // If create flag is specified, then this will create a new file with the // name supplied. No data is read from an opened file. You must call // MapFileToMem before doing direct pointer access to the contents. //***************************************************************************** HRESULT StgIO::Open( // Return code. LPCWSTR szName, // Name of the storage. int fFlags, // How to open the file. const void *pbBuff, // Optional buffer for memory. ULONG cbBuff, // Size of buffer. IStream *pIStream, // Stream for input. LPSECURITY_ATTRIBUTES pAttributes) // Security token. { HRESULT hr; // If we were given the storage memory to begin with, then use it. if (pbBuff && cbBuff) { _ASSERTE((fFlags & DBPROP_TMODEF_WRITE) == 0); // Save the memory address and size only. No handles. m_pData = (void *) pbBuff; m_cbData = cbBuff; // All access to data will be by memory provided. if ((fFlags & DBPROP_TMODEF_SHAREDMEM) == DBPROP_TMODEF_SHAREDMEM) { // We're taking ownership of this memory m_pBaseData = m_pData; m_iType = STGIO_SHAREDMEM; } else { m_iType = STGIO_MEM; } goto ErrExit; } // Check for data backed by a stream pointer. else if (pIStream) { // If this is for the non-create case, get the size of existing data. if ((fFlags & DBPROP_TMODEF_CREATE) == 0) { LARGE_INTEGER iMove = { { 0, 0 } }; ULARGE_INTEGER iSize; // Need the size of the data so we can map it into memory. if (FAILED(hr = pIStream->Seek(iMove, STREAM_SEEK_END, &iSize))) return (hr); m_cbData = iSize.u.LowPart; } // Else there is nothing. else m_cbData = 0; // Save an addref'd copy of the stream. m_pIStream = pIStream; m_pIStream->AddRef(); // All access to data will be by memory provided. m_iType = STGIO_STREAM; goto ErrExit; } // If not on memory, we need a file to do a create/open. if (!szName || !*szName) { return (PostError(E_INVALIDARG)); } // Check for create of a new file. else if (fFlags & DBPROP_TMODEF_CREATE) { //<REVISIT_TODO>@future: This could chose to open the file in write through // mode, which would provide better Duribility (from ACID props), // but would be much slower.</REVISIT_TODO> // Create the new file, overwriting only if caller allows it. if ((m_hFile = WszCreateFile(szName, GENERIC_READ | GENERIC_WRITE, 0, 0, (fFlags & DBPROP_TMODEF_FAILIFTHERE) ? CREATE_NEW : CREATE_ALWAYS, 0, 0)) == INVALID_HANDLE_VALUE) { return (MapFileError(GetLastError())); } // Data will come from the file. m_iType = STGIO_HFILE; } // For open in read mode, need to open the file on disk. If opening a shared // memory view, it has to be opened already, so no file open. else if ((fFlags & DBPROP_TMODEF_WRITE) == 0) { // We have not opened the file nor loaded it as module _ASSERTE(m_hFile == INVALID_HANDLE_VALUE); _ASSERTE(m_hModule == NULL); // Open the file for read. Sharing is determined by caller, it can // allow other readers or be exclusive. DWORD dwFileSharingFlags = FILE_SHARE_DELETE; if (!(fFlags & DBPROP_TMODEF_EXCLUSIVE)) { dwFileSharingFlags |= FILE_SHARE_READ; #if !defined(DACCESS_COMPILE) && !defined(TARGET_UNIX) // PEDecoder is not defined in DAC // We prefer to use LoadLibrary if we can because it will share already loaded images (used for execution) // which saves virtual memory. We only do this if our caller has indicated that this PE file is trusted // and thus it is OK to do LoadLibrary (note that we still only load it as a resource, which mitigates // most of the security risk anyway). if ((fFlags & DBPROP_TMODEF_TRYLOADLIBRARY) != 0) { m_hModule = WszLoadLibraryEx(szName, NULL, LOAD_LIBRARY_AS_IMAGE_RESOURCE); if (m_hModule != NULL) { m_iType = STGIO_HMODULE; m_mtMappedType = MTYPE_IMAGE; // LoadLibraryEx returns 2 lowest bits indicating how the module was loaded m_pBaseData = m_pData = (void *)(((INT_PTR)m_hModule) & ~(INT_PTR)0x3); PEDecoder peDecoder; if (SUCCEEDED(peDecoder.Init( m_pBaseData, false)) && // relocated peDecoder.CheckNTHeaders()) { m_cbData = peDecoder.GetNTHeaders32()->OptionalHeader.SizeOfImage; } else { // PEDecoder failed on loaded library, let's backout all our changes to this object // and fall back to file mapping m_iType = STGIO_NODATA; m_mtMappedType = MTYPE_NOMAPPING; m_pBaseData = m_pData = NULL; FreeLibrary(m_hModule); m_hModule = NULL; } } } #endif //!DACCESS_COMPILE && !TARGET_UNIX } if (m_hModule == NULL) { // We didn't get the loaded module (we either didn't want to or it failed) HandleHolder hFile(WszCreateFile(szName, GENERIC_READ, dwFileSharingFlags, 0, OPEN_EXISTING, 0, 0)); if (hFile == INVALID_HANDLE_VALUE) return (MapFileError(GetLastError())); // Get size of file. m_cbData = ::SetFilePointer(hFile, 0, 0, FILE_END); // Can't read anything from an empty file. if (m_cbData == 0) return (PostError(CLDB_E_NO_DATA)); // Data will come from the file. m_hFile = hFile.Extract(); m_iType = STGIO_HFILE; } } ErrExit: // If we will ever write, then we need the buffer cache. if (fFlags & DBPROP_TMODEF_WRITE) { // Allocate a cache buffer for writing. if ((m_rgBuff = (BYTE *) AllocateMemory(m_iCacheSize)) == NULL) { Close(); return PostError(OutOfMemory()); } m_cbBuff = 0; } // Save flags for later. m_fFlags = fFlags; if ((szName != NULL) && (*szName != 0)) { WCHAR rcExt[_MAX_PATH]; SplitPath(szName, NULL, 0, NULL, 0, NULL, 0, rcExt, _MAX_PATH); if (SString::_wcsicmp(rcExt, W(".obj")) == 0) { m_FileType = FILETYPE_NTOBJ; } else if (SString::_wcsicmp(rcExt, W(".tlb")) == 0) { m_FileType = FILETYPE_TLB; } } // For auto map case, map the view of the file as part of open. if (m_bAutoMap && (m_iType == STGIO_HFILE || m_iType == STGIO_STREAM) && !(fFlags & DBPROP_TMODEF_CREATE)) { void * ptr; ULONG cb; if (FAILED(hr = MapFileToMem(ptr, &cb, pAttributes))) { Close(); return hr; } } return S_OK; } // StgIO::Open //***************************************************************************** // Shut down the file handles and allocated objects. //***************************************************************************** void StgIO::Close() { switch (m_iType) { // Free any allocated memory. case STGIO_SHAREDMEM: if (m_pBaseData != NULL) { CoTaskMemFree(m_pBaseData); m_pBaseData = NULL; break; } FALLTHROUGH; case STGIO_MEM: case STGIO_HFILEMEM: if (m_bFreeMem && m_pBaseData) { FreeMemory(m_pBaseData); m_pBaseData = m_pData = 0; } // Intentional fall through to file case, if we kept handle open. FALLTHROUGH; case STGIO_HFILE: { // Free the file handle. if (m_hFile != INVALID_HANDLE_VALUE) CloseHandle(m_hFile); // If we allocated space for in memory paging, then free it. } break; case STGIO_HMODULE: { if (m_hModule != NULL) FreeLibrary(m_hModule); m_hModule = NULL; break; } // Free the stream pointer. case STGIO_STREAM: { if (m_pIStream != NULL) m_pIStream->Release(); } break; // Weird to shut down what you didn't open, isn't it? Allow for // error case where dtor shuts down as an afterthought. case STGIO_NODATA: default: return; } // Free any page map and base data. FreePageMap(); // Reset state values so we don't get confused. CtorInit(); } //***************************************************************************** // Called to read the data into allocated memory and release the backing store. // Only available on read-only data. //***************************************************************************** HRESULT StgIO::LoadFileToMemory() { HRESULT hr; void *pData; // Allocated buffer for file. ULONG cbData; // Size of the data. ULONG cbRead = 0; // Data actually read. // Make sure it is a read-only file. if (m_fFlags & DBPROP_TMODEF_WRITE) return E_INVALIDARG; // Try to allocate the buffer. cbData = m_cbData; pData = AllocateMemory(cbData); IfNullGo(pData); // Try to read the file into the buffer. IfFailGo(Read(pData, cbData, &cbRead)); if (cbData != cbRead) { _ASSERTE_MSG(FALSE, "Read didn't succeed."); IfFailGo(CLDB_E_FILE_CORRUPT); } // Done with the old data. Close(); // Open with new data. hr = Open(NULL /* szName */, STGIO_READ, pData, cbData, NULL /* IStream* */, NULL /* lpSecurityAttributes */); _ASSERTE(SUCCEEDED(hr)); // should not be a failure code path with open on buffer. // Mark the new memory so that it will be freed later. m_pBaseData = m_pData; m_bFreeMem = true; ErrExit: if (FAILED(hr) && pData) FreeMemory(pData); return hr; } // StgIO::LoadFileToMemory //***************************************************************************** // Read data from the storage source. This will handle all types of backing // storage from mmf, streams, and file handles. No read ahead or MRU // caching is done. //***************************************************************************** HRESULT StgIO::Read( // Return code. void *pbBuff, // Write buffer here. ULONG cbBuff, // How much to read. ULONG *pcbRead) // How much read. { ULONG cbCopy; // For boundary checks. void *pbData; // Data buffer for mem read. HRESULT hr = S_OK; // Validate arguments, don't call if you don't need to. _ASSERTE(pbBuff != 0); _ASSERTE(cbBuff > 0); // Get the data based on type. switch (m_iType) { // For data on file, there are two possiblities: // (1) We have an in memory backing store we should use, or // (2) We just need to read from the file. case STGIO_HFILE: case STGIO_HMODULE: { _ASSERTE((m_hFile != INVALID_HANDLE_VALUE) || (m_hModule != NULL)); // Backing store does its own paging. if (IsBackingStore() || IsMemoryMapped()) { // Force the data into memory. if (FAILED(hr = GetPtrForMem(GetCurrentOffset(), cbBuff, pbData))) goto ErrExit; // Copy it back for the user and save the size. memcpy(pbBuff, pbData, cbBuff); if (pcbRead) *pcbRead = cbBuff; } // If there is no backing store, this is just a read operation. else { _ASSERTE((m_iType == STGIO_HFILE) && (m_hFile != INVALID_HANDLE_VALUE)); _ASSERTE(m_hModule == NULL); ULONG cbTemp = 0; if (!pcbRead) pcbRead = &cbTemp; hr = ReadFromDisk(pbBuff, cbBuff, pcbRead); m_cbOffset += *pcbRead; } } break; // Data in a stream is always just read. case STGIO_STREAM: { _ASSERTE((IStream *) m_pIStream); if (!pcbRead) pcbRead = &cbCopy; *pcbRead = 0; hr = m_pIStream->Read(pbBuff, cbBuff, pcbRead); if (SUCCEEDED(hr)) m_cbOffset += *pcbRead; } break; // Simply copy the data from our data. case STGIO_MEM: case STGIO_SHAREDMEM: case STGIO_HFILEMEM: { _ASSERTE(m_pData && m_cbData); // Check for read past end of buffer and adjust. if (GetCurrentOffset() + cbBuff > m_cbData) cbCopy = m_cbData - GetCurrentOffset(); else cbCopy = cbBuff; // Copy the data into the callers buffer. memcpy(pbBuff, (void *) ((DWORD_PTR)m_pData + GetCurrentOffset()), cbCopy); if (pcbRead) *pcbRead = cbCopy; // Save a logical offset. m_cbOffset += cbCopy; } break; case STGIO_NODATA: default: _ASSERTE(0); break; } ErrExit: return (hr); } //***************************************************************************** // Write to disk. This function will cache up to a page of data in a buffer // and peridocially flush it on overflow and explicit request. This makes it // safe to do lots of small writes without too much performance overhead. //***************************************************************************** HRESULT StgIO::Write( // true/false. const void *pbBuff, // Data to write. ULONG cbWrite, // How much data to write. ULONG *pcbWritten) // How much did get written. { ULONG cbWriteIn=cbWrite; // Track amount written. ULONG cbCopy; HRESULT hr = S_OK; _ASSERTE(m_rgBuff != 0); _ASSERTE(cbWrite); while (cbWrite) { // In the case where the buffer is already huge, write the whole thing // and avoid the cache. if (m_cbBuff == 0 && cbWrite >= (ULONG) m_iPageSize) { if (SUCCEEDED(hr = WriteToDisk(pbBuff, cbWrite, pcbWritten))) m_cbOffset += cbWrite; break; } // Otherwise cache as much as we can and flush. else { // Determine how much data goes into the cache buffer. cbCopy = m_iPageSize - m_cbBuff; cbCopy = min(cbCopy, cbWrite); // Copy the data into the cache and adjust counts. memcpy(&m_rgBuff[m_cbBuff], pbBuff, cbCopy); pbBuff = (void *) ((DWORD_PTR)pbBuff + cbCopy); m_cbBuff += cbCopy; m_cbOffset += cbCopy; cbWrite -= cbCopy; // If there is enough data, then flush it to disk and reset count. if (m_cbBuff >= (ULONG) m_iPageSize) { if (FAILED(hr = FlushCache())) break; } } } // Return value for caller. if (SUCCEEDED(hr) && pcbWritten) *pcbWritten = cbWriteIn; return (hr); } //***************************************************************************** // Moves the file pointer to the new location. This handles the different // types of storage systems. //***************************************************************************** HRESULT StgIO::Seek( // New offset. int lVal, // How much to move. ULONG fMoveType) // Direction, use Win32 FILE_xxxx. { ULONG cbRtn = 0; HRESULT hr = NOERROR; _ASSERTE(fMoveType >= FILE_BEGIN && fMoveType <= FILE_END); // Action taken depends on type of storage. switch (m_iType) { case STGIO_HFILE: { // Use the file system's move. _ASSERTE(m_hFile != INVALID_HANDLE_VALUE); cbRtn = ::SetFilePointer(m_hFile, lVal, 0, fMoveType); // Save the location redundantly. if (cbRtn != 0xffffffff) { // make sure that m_cbOffset will stay within range if (cbRtn > m_cbData || cbRtn < 0) { IfFailGo(STG_E_INVALIDFUNCTION); } m_cbOffset = cbRtn; } } break; case STGIO_STREAM: { LARGE_INTEGER iMove; ULARGE_INTEGER iNewLoc; // Need a 64-bit int. iMove.QuadPart = lVal; // The move types are named differently, but have same value. if (FAILED(hr = m_pIStream->Seek(iMove, fMoveType, &iNewLoc))) return (hr); // make sure that m_cbOffset will stay within range if (iNewLoc.u.LowPart > m_cbData || iNewLoc.u.LowPart < 0) IfFailGo(STG_E_INVALIDFUNCTION); // Save off only out location. m_cbOffset = iNewLoc.u.LowPart; } break; case STGIO_MEM: case STGIO_SHAREDMEM: case STGIO_HFILEMEM: case STGIO_HMODULE: { // We own the offset, so change our value. switch (fMoveType) { case FILE_BEGIN: // make sure that m_cbOffset will stay within range if ((ULONG) lVal > m_cbData || lVal < 0) { IfFailGo(STG_E_INVALIDFUNCTION); } m_cbOffset = lVal; break; case FILE_CURRENT: // make sure that m_cbOffset will stay within range if (m_cbOffset + lVal > m_cbData) { IfFailGo(STG_E_INVALIDFUNCTION); } m_cbOffset = m_cbOffset + lVal; break; case FILE_END: _ASSERTE(lVal < (LONG) m_cbData); // make sure that m_cbOffset will stay within range if (m_cbData + lVal > m_cbData) { IfFailGo(STG_E_INVALIDFUNCTION); } m_cbOffset = m_cbData + lVal; break; } cbRtn = m_cbOffset; } break; // Weird to seek with no data. case STGIO_NODATA: default: _ASSERTE(0); break; } ErrExit: return hr; } //***************************************************************************** // Retrieves the current offset for the storage being used. This value is // tracked based on Read, Write, and Seek operations. //***************************************************************************** ULONG StgIO::GetCurrentOffset() // Current offset. { return (m_cbOffset); } //***************************************************************************** // Map the file contents to a memory mapped file and return a pointer to the // data. For read/write with a backing store, map the file using an internal // paging system. //***************************************************************************** HRESULT StgIO::MapFileToMem( // Return code. void *&ptr, // Return pointer to file data. ULONG *pcbSize, // Return size of data. LPSECURITY_ATTRIBUTES pAttributes) // Security token. { char rcShared[MAXSHMEM]; // ANSI version of shared name. HRESULT hr = S_OK; // Don't penalize for multiple calls. Also, allow calls for mem type so // callers don't need to do so much checking. if (IsBackingStore() || IsMemoryMapped() || (m_iType == STGIO_MEM) || (m_iType == STGIO_SHAREDMEM) || (m_iType == STGIO_HFILEMEM)) { ptr = m_pData; if (pcbSize) *pcbSize = m_cbData; return (S_OK); } //#CopySmallFiles // Check the size of the data we want to map. If it is small enough, then // simply allocate a chunk of memory from a finer grained heap. This saves // virtual memory space, page table entries, and should reduce overall working set. // Also, open for read/write needs a full backing store. if ((m_cbData <= SMALL_ALLOC_MAP_SIZE) && (SMALL_ALLOC_MAP_SIZE > 0)) { DWORD cbRead = m_cbData; _ASSERTE(m_pData == 0); // Just malloc a chunk of data to use. m_pBaseData = m_pData = AllocateMemory(m_cbData); if (!m_pData) { hr = OutOfMemory(); goto ErrExit; } // Read all of the file contents into this piece of memory. IfFailGo( Seek(0, FILE_BEGIN) ); if (FAILED(hr = Read(m_pData, cbRead, &cbRead))) { FreeMemory(m_pData); m_pData = 0; goto ErrExit; } _ASSERTE(cbRead == m_cbData); // If the file isn't being opened for exclusive mode, then free it. // If it is for exclusive, then we need to keep the handle open so the // file is locked, preventing other readers. Also leave it open if // in read/write mode so we can truncate and rewrite. if (m_hFile == INVALID_HANDLE_VALUE || ((m_fFlags & DBPROP_TMODEF_EXCLUSIVE) == 0 && (m_fFlags & DBPROP_TMODEF_WRITE) == 0)) { // If there was a handle open, then free it. if (m_hFile != INVALID_HANDLE_VALUE) { VERIFY(CloseHandle(m_hFile)); m_hFile = INVALID_HANDLE_VALUE; } // Free the stream pointer. else if (m_pIStream != 0) { m_pIStream->Release(); m_pIStream = 0; } // Switch the type to memory only access. m_iType = STGIO_MEM; } else m_iType = STGIO_HFILEMEM; // Free the memory when we shut down. m_bFreeMem = true; } // Finally, a real mapping file must be created. else { // Now we will map, so better have it right. _ASSERTE(m_hFile != INVALID_HANDLE_VALUE || m_iType == STGIO_STREAM); _ASSERTE(m_rgPageMap == 0); // For read mode, use a memory mapped file since the size will never // change for the life of the handle. if ((m_fFlags & DBPROP_TMODEF_WRITE) == 0 && m_iType != STGIO_STREAM) { // Create a mapping object for the file. _ASSERTE(m_hMapping == 0); DWORD dwProtectionFlags = PAGE_READONLY; if ((m_hMapping = WszCreateFileMapping(m_hFile, pAttributes, dwProtectionFlags, 0, 0, nullptr)) == 0) { return (MapFileError(GetLastError())); } m_mtMappedType = MTYPE_FLAT; // Check to see if the memory already exists, in which case we have // no guarantees it is the right piece of data. if (GetLastError() == ERROR_ALREADY_EXISTS) { hr = PostError(CLDB_E_SMDUPLICATE, rcShared); goto ErrExit; } // Now map the file into memory so we can read from pointer access. // <REVISIT_TODO>Note: Added a check for IsBadReadPtr per the Services team which // indicates that under some conditions this API can give you back // a totally bogus pointer.</REVISIT_TODO> if ((m_pBaseData = m_pData = MapViewOfFile(m_hMapping, FILE_MAP_READ, 0, 0, 0)) == 0) { hr = MapFileError(GetLastError()); if (SUCCEEDED(hr)) { _ASSERTE_MSG(FALSE, "Error code doesn't indicate error."); hr = PostError(CLDB_E_FILE_CORRUPT); } // In case we got back a bogus pointer. m_pBaseData = m_pData = NULL; goto ErrExit; } } // In write mode, we need the hybrid combination of being able to back up // the data in memory via cache, but then later rewrite the contents and // throw away our cached copy. Memory mapped files are not good for this // case due to poor write characteristics. else { ULONG iMaxSize; // How much memory required for file. // Figure out how many pages we'll require, round up actual data // size to page size. iMaxSize = (((m_cbData - 1) & ~(m_iPageSize - 1)) + m_iPageSize); // Check integer overflow in previous statement if (iMaxSize < m_cbData) { IfFailGo(PostError(COR_E_OVERFLOW)); } // Allocate a bit vector to track loaded pages. if ((m_rgPageMap = new (nothrow) BYTE[iMaxSize / m_iPageSize]) == 0) return (PostError(OutOfMemory())); memset(m_rgPageMap, 0, sizeof(BYTE) * (iMaxSize / m_iPageSize)); // Allocate space for the file contents. if ((m_pBaseData = m_pData = ::ClrVirtualAlloc(0, iMaxSize, MEM_RESERVE, PAGE_NOACCESS)) == 0) { hr = PostError(OutOfMemory()); goto ErrExit; } } } // Reset any changes made by mapping. IfFailGo( Seek(0, FILE_BEGIN) ); ErrExit: // Check for errors and clean up. if (FAILED(hr)) { if (m_hMapping) CloseHandle(m_hMapping); m_hMapping = 0; m_pBaseData = m_pData = 0; m_cbData = 0; } ptr = m_pData; if (pcbSize) *pcbSize = m_cbData; return (hr); } //***************************************************************************** // Free the mapping object for shared memory but keep the rest of the internal // state intact. //***************************************************************************** HRESULT StgIO::ReleaseMappingObject() // Return code. { // Check type first. if (m_iType != STGIO_SHAREDMEM) { _ASSERTE(FALSE); return S_OK; } // Must have an allocated handle. _ASSERTE(m_hMapping != 0); // Freeing the mapping object doesn't do any good if you still have the file. _ASSERTE(m_hFile == INVALID_HANDLE_VALUE); // Unmap the memory we allocated before freeing the handle. But keep the // memory address intact. if (m_pData) VERIFY(UnmapViewOfFile(m_pData)); // Free the handle. if (m_hMapping != 0) { VERIFY(CloseHandle(m_hMapping)); m_hMapping = 0; } return S_OK; } //***************************************************************************** // Resets the logical base address and size to the value given. This is for // cases like finding a section embedded in another format, like the .clb inside // of an image. GetPtrForMem, Read, and Seek will then behave as though only // data from pbStart to cbSize is valid. //***************************************************************************** HRESULT StgIO::SetBaseRange( // Return code. void *pbStart, // Start of file data. ULONG cbSize) // How big is the range. { if (m_iType == STGIO_SHAREDMEM) { // The base range must be inside of the current range. _ASSERTE((m_pBaseData != NULL) && (m_cbData != 0)); _ASSERTE(((LONG_PTR) pbStart >= (LONG_PTR) m_pBaseData)); _ASSERTE(((LONG_PTR) pbStart + cbSize <= (LONG_PTR) m_pBaseData + m_cbData)); } // Save the base range per user request. m_pData = pbStart; m_cbData = cbSize; return S_OK; } //***************************************************************************** // Caller wants a pointer to a chunk of the file. This function will make sure // that the memory for that chunk has been committed and will load from the // file if required. This algorithm attempts to load no more data from disk // than is necessary. It walks the required pages from lowest to highest, // and for each block of unloaded pages, the memory is committed and the data // is read from disk. If all pages are unloaded, all of them are loaded at // once to speed throughput from disk. //***************************************************************************** HRESULT StgIO::GetPtrForMem( // Return code. ULONG cbStart, // Where to start getting memory. ULONG cbSize, // How much data. void *&ptr) // Return pointer to memory here. { int iFirst, iLast; // First and last page required. ULONG iOffset, iSize; // For committing ranges of memory. int i, j; // Loop control. HRESULT hr; // We need either memory (mmf or user supplied) or a backing store to // return a pointer. Call Read if you don't have these. if (!IsBackingStore() && m_pData == 0) return (PostError(BadError(E_UNEXPECTED))); // Validate the caller isn't asking for a data value out of range. if (!(ClrSafeInt<ULONG>::addition(cbStart, cbSize, iOffset) && (iOffset <= m_cbData))) return (PostError(E_INVALIDARG)); // This code will check for pages that need to be paged from disk in // order for us to return a pointer to that memory. if (IsBackingStore()) { // Backing store is bogus when in rewrite mode. if (m_bRewrite) return (PostError(BadError(E_UNEXPECTED))); // Must have the page map to continue. _ASSERTE(m_rgPageMap && m_iPageSize && m_pData); // Figure out the first and last page that are required for commit. iFirst = cbStart / m_iPageSize; iLast = (cbStart + cbSize - 1) / m_iPageSize; // Avoid confusion. ptr = 0; // Do a smart load of every page required. Do not reload pages that have // already been brought in from disk. //<REVISIT_TODO>@FUTURE: add an optimization so that when all pages have been faulted, we no // longer to a page by page search.</REVISIT_TODO> for (i=iFirst; i<=iLast; ) { // Find the first page that hasn't already been loaded. while (GetBit(m_rgPageMap, i) && i<=iLast) ++i; if (i > iLast) break; // Offset for first thing to load. iOffset = i * m_iPageSize; iSize = 0; // See how many in a row have not been loaded. for (j=i; i<=iLast && !GetBit(m_rgPageMap, i); i++) { // Safe: iSize += m_iPageSize; if (!(ClrSafeInt<ULONG>::addition(iSize, m_iPageSize, iSize))) { return PostError(E_INVALIDARG); } } // First commit the memory for this part of the file. if (::ClrVirtualAlloc((void *) ((DWORD_PTR) m_pData + iOffset), iSize, MEM_COMMIT, PAGE_READWRITE) == 0) return (PostError(OutOfMemory())); // Now load that portion of the file from disk. if (FAILED(hr = Seek(iOffset, FILE_BEGIN)) || FAILED(hr = ReadFromDisk((void *) ((DWORD_PTR) m_pData + iOffset), iSize, 0))) { return (hr); } // Change the memory to read only to avoid any modifications. Any faults // that occur indicate a bug whereby the engine is trying to write to // protected memory. _ASSERTE(::ClrVirtualAlloc((void *) ((DWORD_PTR) m_pData + iOffset), iSize, MEM_COMMIT, PAGE_READONLY) != 0); // Record each new loaded page. for (; j<i; j++) SetBit(m_rgPageMap, j, true); } // Everything was brought into memory, so now return pointer to caller. ptr = (void *) ((DWORD_PTR) m_pData + cbStart); } // Memory version or memory mapped file work the same way. else if (IsMemoryMapped() || (m_iType == STGIO_MEM) || (m_iType == STGIO_SHAREDMEM) || (m_iType == STGIO_HFILEMEM)) { if (!(cbStart <= m_cbData)) return (PostError(E_INVALIDARG)); ptr = (void *) ((DWORD_PTR) m_pData + cbStart); } // What's left?! Add some defense. else { _ASSERTE(0); ptr = 0; return (PostError(BadError(E_UNEXPECTED))); } return (S_OK); } //***************************************************************************** // For cached writes, flush the cache to the data store. //***************************************************************************** HRESULT StgIO::FlushCache() { ULONG cbWritten; HRESULT hr; if (m_cbBuff) { if (FAILED(hr = WriteToDisk(m_rgBuff, m_cbBuff, &cbWritten))) return (hr); m_cbBuff = 0; } return (S_OK); } //***************************************************************************** // Tells the file system to flush any cached data it may have. This is // expensive, but if successful guarantees you won't lose writes short of // a disk failure. //***************************************************************************** HRESULT StgIO::FlushFileBuffers() { _ASSERTE(!IsReadOnly()); if (m_hFile != INVALID_HANDLE_VALUE) { if (::FlushFileBuffers(m_hFile)) return (S_OK); else return (MapFileError(GetLastError())); } return (S_OK); } //***************************************************************************** // Called after a successful rewrite of an existing file. The in memory // backing store is no longer valid because all new data is in memory and // on disk. This is essentially the same state as created, so free up some // working set and remember this state. //***************************************************************************** HRESULT StgIO::ResetBackingStore() // Return code. { // Don't be calling this function for read only data. _ASSERTE(!IsReadOnly()); // Free up any backing store data we no longer need now that everything // is in memory. FreePageMap(); return (S_OK); } // // Private. // //***************************************************************************** // This version will force the data in cache out to disk for real. The code // can handle the different types of storage we might be sitting on based on // the open type. //***************************************************************************** HRESULT StgIO::WriteToDisk( // Return code. const void *pbBuff, // Buffer to write. ULONG cbWrite, // How much. ULONG *pcbWritten) // Return how much written. { ULONG cbWritten; // Buffer for write funcs. HRESULT hr = S_OK; // Pretty obvious. _ASSERTE(!IsReadOnly()); // Always need a buffer to write this data to. if (!pcbWritten) pcbWritten = &cbWritten; // Action taken depends on type of storage. switch (m_iType) { case STGIO_HFILE: case STGIO_HFILEMEM: { // Use the file system's move. _ASSERTE(m_hFile != INVALID_HANDLE_VALUE); // Do the write to disk. if (!::WriteFile(m_hFile, pbBuff, cbWrite, pcbWritten, 0)) hr = MapFileError(GetLastError()); } break; // Free the stream pointer. case STGIO_STREAM: { // Delegate write to stream code. hr = m_pIStream->Write(pbBuff, cbWrite, pcbWritten); } break; // We cannot write to fixed read/only memory or LoadLibrary module. case STGIO_HMODULE: case STGIO_MEM: case STGIO_SHAREDMEM: _ASSERTE(0); hr = BadError(E_UNEXPECTED); break; // Weird to seek with no data. case STGIO_NODATA: default: _ASSERTE(0); break; } return (hr); } //***************************************************************************** // This version only reads from disk. //***************************************************************************** HRESULT StgIO::ReadFromDisk( // Return code. void *pbBuff, // Write buffer here. ULONG cbBuff, // How much to read. ULONG *pcbRead) // How much read. { ULONG cbRead; _ASSERTE(m_iType == STGIO_HFILE || m_iType == STGIO_STREAM); // Need to have a buffer. if (!pcbRead) pcbRead = &cbRead; // Read only from file to avoid recursive logic. if (m_iType == STGIO_HFILE || m_iType == STGIO_HFILEMEM) { if (::ReadFile(m_hFile, pbBuff, cbBuff, pcbRead, 0)) return (S_OK); return (MapFileError(GetLastError())); } // Read directly from stream. else { return (m_pIStream->Read(pbBuff, cbBuff, pcbRead)); } } //***************************************************************************** // Copy the contents of the file for this storage to the target path. //***************************************************************************** HRESULT StgIO::CopyFileInternal( // Return code. LPCWSTR szTo, // Target save path for file. int bFailIfThere, // true to fail if target exists. int bWriteThrough) // Should copy be written through OS cache. { DWORD iCurrent; // Save original location. DWORD cbRead; // Byte count for buffer. DWORD cbWrite; // Check write of bytes. const DWORD cbBuff = 4096; // Size of buffer for copy (in bytes). BYTE *pBuff = (BYTE*)alloca(cbBuff); // Buffer for copy. HANDLE hFile; // Target file. HRESULT hr = S_OK; // Create target file. if ((hFile = ::WszCreateFile(szTo, GENERIC_WRITE, 0, 0, (bFailIfThere) ? CREATE_NEW : CREATE_ALWAYS, (bWriteThrough) ? FILE_FLAG_WRITE_THROUGH : 0, 0)) == INVALID_HANDLE_VALUE) { return (MapFileError(GetLastError())); } // Save current location and reset it later. iCurrent = ::SetFilePointer(m_hFile, 0, 0, FILE_CURRENT); ::SetFilePointer(m_hFile, 0, 0, FILE_BEGIN); // Copy while there are bytes. while (::ReadFile(m_hFile, pBuff, cbBuff, &cbRead, 0) && cbRead) { if (!::WriteFile(hFile, pBuff, cbRead, &cbWrite, 0) || cbWrite != cbRead) { hr = STG_E_WRITEFAULT; break; } } // Reset file offset. ::SetFilePointer(m_hFile, iCurrent, 0, FILE_BEGIN); // Close target. if (!bWriteThrough) VERIFY(::FlushFileBuffers(hFile)); ::CloseHandle(hFile); return (hr); } //***************************************************************************** // Free the data used for backing store from disk in read/write scenario. //***************************************************************************** void StgIO::FreePageMap() { // If a small file was allocated, then free that memory. if (m_bFreeMem && m_pBaseData) FreeMemory(m_pBaseData); // For mmf, close handles and free resources. else if (m_hMapping && m_pBaseData) { VERIFY(UnmapViewOfFile(m_pBaseData)); VERIFY(CloseHandle(m_hMapping)); } // For our own system, free memory. else if (m_rgPageMap && m_pBaseData) { delete [] m_rgPageMap; m_rgPageMap = 0; VERIFY(::ClrVirtualFree(m_pBaseData, (((m_cbData - 1) & ~(m_iPageSize - 1)) + m_iPageSize), MEM_DECOMMIT)); VERIFY(::ClrVirtualFree(m_pBaseData, 0, MEM_RELEASE)); m_pBaseData = 0; m_cbData = 0; } m_pBaseData = 0; m_hMapping = 0; m_cbData = 0; } //***************************************************************************** // Check the given pointer and ensure it is aligned correct. Return true // if it is aligned, false if it is not. //***************************************************************************** int StgIO::IsAlignedPtr(ULONG_PTR Value, int iAlignment) { HRESULT hr; void *ptrStart = NULL; if ((m_iType == STGIO_STREAM) || (m_iType == STGIO_SHAREDMEM) || (m_iType == STGIO_MEM)) { return ((Value - (ULONG_PTR) m_pData) % iAlignment == 0); } else { hr = GetPtrForMem(0, 1, ptrStart); _ASSERTE(hr == S_OK && "GetPtrForMem failed"); _ASSERTE(Value > (ULONG_PTR) ptrStart); return (((Value - (ULONG_PTR) ptrStart) % iAlignment) == 0); } } // int StgIO::IsAlignedPtr() //***************************************************************************** // These helper functions are used to allocate fairly large pieces of memory, // more than should be taken from the runtime heap, but less that would require // virtual memory overhead. //***************************************************************************** // #define _TRACE_MEM_ 1 void *AllocateMemory(int iSize) { void * ptr; ptr = new (nothrow) BYTE[iSize]; #if defined(_DEBUG) && defined(_TRACE_MEM_) static int i=0; DbgWriteEx(W("AllocateMemory: (%d) 0x%08x, size %d\n"), ++i, ptr, iSize); #endif return (ptr); } void FreeMemory(void *pbData) { #if defined(_DEBUG) && defined(_TRACE_MEM_) static int i=0; DbgWriteEx(W("FreeMemory: (%d) 0x%08x\n"), ++i, pbData); #endif _ASSERTE(pbData); delete [] (BYTE *) pbData; }
-1
dotnet/runtime
66,367
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled
When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
jakobbotsch
2022-03-09T00:01:53Z
2022-03-09T12:59:09Z
c9f7f7389e8e9a00d501aef696333b67d218baac
cef825fd5425203ac28d073bf9fccc33c638f179
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled. When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
./src/coreclr/vm/amd64/cgenamd64.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // Various helper routines for generating AMD64 assembly code. // // Precompiled Header #include "common.h" #include "stublink.h" #include "cgensys.h" #include "siginfo.hpp" #include "excep.h" #include "ecall.h" #include "dllimport.h" #include "dllimportcallback.h" #include "dbginterface.h" #include "fcall.h" #include "array.h" #include "virtualcallstub.h" #include "jitinterface.h" #ifdef FEATURE_COMINTEROP #include "clrtocomcall.h" #endif // FEATURE_COMINTEROP void UpdateRegDisplayFromCalleeSavedRegisters(REGDISPLAY * pRD, CalleeSavedRegisters * pRegs) { LIMITED_METHOD_CONTRACT; T_CONTEXT * pContext = pRD->pCurrentContext; #define CALLEE_SAVED_REGISTER(regname) pContext->regname = pRegs->regname; ENUM_CALLEE_SAVED_REGISTERS(); #undef CALLEE_SAVED_REGISTER KNONVOLATILE_CONTEXT_POINTERS * pContextPointers = pRD->pCurrentContextPointers; #define CALLEE_SAVED_REGISTER(regname) pContextPointers->regname = (PULONG64)&pRegs->regname; ENUM_CALLEE_SAVED_REGISTERS(); #undef CALLEE_SAVED_REGISTER } void ClearRegDisplayArgumentAndScratchRegisters(REGDISPLAY * pRD) { LIMITED_METHOD_CONTRACT; KNONVOLATILE_CONTEXT_POINTERS * pContextPointers = pRD->pCurrentContextPointers; pContextPointers->Rax = NULL; #ifdef UNIX_AMD64_ABI pContextPointers->Rsi = NULL; pContextPointers->Rdi = NULL; #endif pContextPointers->Rcx = NULL; pContextPointers->Rdx = NULL; pContextPointers->R8 = NULL; pContextPointers->R9 = NULL; pContextPointers->R10 = NULL; pContextPointers->R11 = NULL; } void TransitionFrame::UpdateRegDisplay(const PREGDISPLAY pRD) { LIMITED_METHOD_CONTRACT; pRD->IsCallerContextValid = FALSE; pRD->IsCallerSPValid = FALSE; // Don't add usage of this field. This is only temporary. pRD->pCurrentContext->Rip = GetReturnAddress(); pRD->pCurrentContext->Rsp = GetSP(); UpdateRegDisplayFromCalleeSavedRegisters(pRD, GetCalleeSavedRegisters()); ClearRegDisplayArgumentAndScratchRegisters(pRD); SyncRegDisplayToCurrentContext(pRD); LOG((LF_GCROOTS, LL_INFO100000, "STACKWALK TransitionFrame::UpdateRegDisplay(rip:%p, rsp:%p)\n", pRD->ControlPC, pRD->SP)); } void InlinedCallFrame::UpdateRegDisplay(const PREGDISPLAY pRD) { CONTRACTL { NOTHROW; GC_NOTRIGGER; #ifdef PROFILING_SUPPORTED PRECONDITION(CORProfilerStackSnapshotEnabled() || InlinedCallFrame::FrameHasActiveCall(this)); #endif HOST_NOCALLS; MODE_ANY; SUPPORTS_DAC; } CONTRACTL_END; if (!InlinedCallFrame::FrameHasActiveCall(this)) { LOG((LF_CORDB, LL_ERROR, "WARNING: InlinedCallFrame::UpdateRegDisplay called on inactive frame %p\n", this)); return; } pRD->IsCallerContextValid = FALSE; pRD->IsCallerSPValid = FALSE; // Don't add usage of this field. This is only temporary. pRD->pCurrentContext->Rip = *(DWORD64 *)&m_pCallerReturnAddress; pRD->pCurrentContext->Rsp = *(DWORD64 *)&m_pCallSiteSP; pRD->pCurrentContext->Rbp = *(DWORD64 *)&m_pCalleeSavedFP; ClearRegDisplayArgumentAndScratchRegisters(pRD); #define CALLEE_SAVED_REGISTER(regname) pRD->pCurrentContextPointers->regname = NULL; ENUM_CALLEE_SAVED_REGISTERS(); #undef CALLEE_SAVED_REGISTER pRD->pCurrentContextPointers->Rbp = (DWORD64 *)&m_pCalleeSavedFP; SyncRegDisplayToCurrentContext(pRD); LOG((LF_GCROOTS, LL_INFO100000, "STACKWALK InlinedCallFrame::UpdateRegDisplay(rip:%p, rsp:%p)\n", pRD->ControlPC, pRD->SP)); } void HelperMethodFrame::UpdateRegDisplay(const PREGDISPLAY pRD) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; PRECONDITION(m_MachState._pRetAddr == PTR_TADDR(&m_MachState.m_Rip)); SUPPORTS_DAC; } CONTRACTL_END; pRD->IsCallerContextValid = FALSE; pRD->IsCallerSPValid = FALSE; // Don't add usage of this field. This is only temporary. // // Copy the saved state from the frame to the current context. // LOG((LF_GCROOTS, LL_INFO100000, "STACKWALK HelperMethodFrame::UpdateRegDisplay cached ip:%p, sp:%p\n", m_MachState.m_Rip, m_MachState.m_Rsp)); #if defined(DACCESS_COMPILE) // For DAC, we may get here when the HMF is still uninitialized. // So we may need to unwind here. if (!m_MachState.isValid()) { // This allocation throws on OOM. MachState* pUnwoundState = (MachState*)DacAllocHostOnlyInstance(sizeof(*pUnwoundState), true); InsureInit(false, pUnwoundState); pRD->pCurrentContext->Rip = pRD->ControlPC = pUnwoundState->m_Rip; pRD->pCurrentContext->Rsp = pRD->SP = pUnwoundState->m_Rsp; #define CALLEE_SAVED_REGISTER(regname) pRD->pCurrentContext->regname = pUnwoundState->m_Capture.regname; ENUM_CALLEE_SAVED_REGISTERS(); #undef CALLEE_SAVED_REGISTER #define CALLEE_SAVED_REGISTER(regname) pRD->pCurrentContextPointers->regname = pUnwoundState->m_Ptrs.p##regname; ENUM_CALLEE_SAVED_REGISTERS(); #undef CALLEE_SAVED_REGISTER ClearRegDisplayArgumentAndScratchRegisters(pRD); return; } #endif // DACCESS_COMPILE pRD->pCurrentContext->Rip = pRD->ControlPC = m_MachState.m_Rip; pRD->pCurrentContext->Rsp = pRD->SP = m_MachState.m_Rsp; #ifdef TARGET_UNIX #define CALLEE_SAVED_REGISTER(regname) pRD->pCurrentContext->regname = (m_MachState.m_Ptrs.p##regname != NULL) ? \ *m_MachState.m_Ptrs.p##regname : m_MachState.m_Unwound.regname; ENUM_CALLEE_SAVED_REGISTERS(); #undef CALLEE_SAVED_REGISTER #else // TARGET_UNIX #define CALLEE_SAVED_REGISTER(regname) pRD->pCurrentContext->regname = *m_MachState.m_Ptrs.p##regname; ENUM_CALLEE_SAVED_REGISTERS(); #undef CALLEE_SAVED_REGISTER #endif // TARGET_UNIX #define CALLEE_SAVED_REGISTER(regname) pRD->pCurrentContextPointers->regname = m_MachState.m_Ptrs.p##regname; ENUM_CALLEE_SAVED_REGISTERS(); #undef CALLEE_SAVED_REGISTER // // Clear all knowledge of scratch registers. We're skipping to any // arbitrary point on the stack, and frames aren't required to preserve or // keep track of these anyways. // ClearRegDisplayArgumentAndScratchRegisters(pRD); } void FaultingExceptionFrame::UpdateRegDisplay(const PREGDISPLAY pRD) { LIMITED_METHOD_DAC_CONTRACT; memcpy(pRD->pCurrentContext, &m_ctx, sizeof(CONTEXT)); pRD->ControlPC = m_ctx.Rip; pRD->SP = m_ctx.Rsp; pRD->pCurrentContextPointers->Rax = &m_ctx.Rax; pRD->pCurrentContextPointers->Rcx = &m_ctx.Rcx; pRD->pCurrentContextPointers->Rdx = &m_ctx.Rdx; pRD->pCurrentContextPointers->Rbx = &m_ctx.Rbx; pRD->pCurrentContextPointers->Rbp = &m_ctx.Rbp; pRD->pCurrentContextPointers->Rsi = &m_ctx.Rsi; pRD->pCurrentContextPointers->Rdi = &m_ctx.Rdi; pRD->pCurrentContextPointers->R8 = &m_ctx.R8; pRD->pCurrentContextPointers->R9 = &m_ctx.R9; pRD->pCurrentContextPointers->R10 = &m_ctx.R10; pRD->pCurrentContextPointers->R11 = &m_ctx.R11; pRD->pCurrentContextPointers->R12 = &m_ctx.R12; pRD->pCurrentContextPointers->R13 = &m_ctx.R13; pRD->pCurrentContextPointers->R14 = &m_ctx.R14; pRD->pCurrentContextPointers->R15 = &m_ctx.R15; pRD->IsCallerContextValid = FALSE; pRD->IsCallerSPValid = FALSE; // Don't add usage of this field. This is only temporary. } #ifdef FEATURE_HIJACK TADDR ResumableFrame::GetReturnAddressPtr() { LIMITED_METHOD_DAC_CONTRACT; return dac_cast<TADDR>(m_Regs) + offsetof(CONTEXT, Rip); } void ResumableFrame::UpdateRegDisplay(const PREGDISPLAY pRD) { CONTRACT_VOID { NOTHROW; GC_NOTRIGGER; MODE_ANY; SUPPORTS_DAC; } CONTRACT_END; CopyMemory(pRD->pCurrentContext, m_Regs, sizeof(CONTEXT)); pRD->ControlPC = m_Regs->Rip; pRD->SP = m_Regs->Rsp; pRD->pCurrentContextPointers->Rax = &m_Regs->Rax; pRD->pCurrentContextPointers->Rcx = &m_Regs->Rcx; pRD->pCurrentContextPointers->Rdx = &m_Regs->Rdx; pRD->pCurrentContextPointers->Rbx = &m_Regs->Rbx; pRD->pCurrentContextPointers->Rbp = &m_Regs->Rbp; pRD->pCurrentContextPointers->Rsi = &m_Regs->Rsi; pRD->pCurrentContextPointers->Rdi = &m_Regs->Rdi; pRD->pCurrentContextPointers->R8 = &m_Regs->R8; pRD->pCurrentContextPointers->R9 = &m_Regs->R9; pRD->pCurrentContextPointers->R10 = &m_Regs->R10; pRD->pCurrentContextPointers->R11 = &m_Regs->R11; pRD->pCurrentContextPointers->R12 = &m_Regs->R12; pRD->pCurrentContextPointers->R13 = &m_Regs->R13; pRD->pCurrentContextPointers->R14 = &m_Regs->R14; pRD->pCurrentContextPointers->R15 = &m_Regs->R15; pRD->IsCallerContextValid = FALSE; pRD->IsCallerSPValid = FALSE; // Don't add usage of this field. This is only temporary. RETURN; } // The HijackFrame has to know the registers that are pushed by OnHijackTripThread void HijackFrame::UpdateRegDisplay(const PREGDISPLAY pRD) { CONTRACTL { NOTHROW; GC_NOTRIGGER; SUPPORTS_DAC; } CONTRACTL_END; pRD->IsCallerContextValid = FALSE; pRD->IsCallerSPValid = FALSE; // Don't add usage of this field. This is only temporary. pRD->pCurrentContext->Rip = m_ReturnAddress; #ifdef TARGET_WINDOWS pRD->pCurrentContext->Rsp = m_Args->Rsp; #else pRD->pCurrentContext->Rsp = PTR_TO_MEMBER_TADDR(HijackArgs, m_Args, Rip) + sizeof(void *); #endif UpdateRegDisplayFromCalleeSavedRegisters(pRD, &(m_Args->Regs)); #ifdef UNIX_AMD64_ABI pRD->pCurrentContextPointers->Rsi = NULL; pRD->pCurrentContextPointers->Rdi = NULL; #endif pRD->pCurrentContextPointers->Rcx = NULL; #ifdef UNIX_AMD64_ABI pRD->pCurrentContextPointers->Rdx = (PULONG64)&m_Args->Rdx; #else // UNIX_AMD64_ABI pRD->pCurrentContextPointers->Rdx = NULL; #endif // UNIX_AMD64_ABI pRD->pCurrentContextPointers->R8 = NULL; pRD->pCurrentContextPointers->R9 = NULL; pRD->pCurrentContextPointers->R10 = NULL; pRD->pCurrentContextPointers->R11 = NULL; pRD->pCurrentContextPointers->Rax = (PULONG64)&m_Args->Rax; SyncRegDisplayToCurrentContext(pRD); /* // This only describes the top-most frame pRD->pContext = NULL; pRD->PCTAddr = dac_cast<TADDR>(m_Args) + offsetof(HijackArgs, Rip); //pRD->pPC = PTR_SLOT(pRD->PCTAddr); pRD->SP = (ULONG64)(pRD->PCTAddr + sizeof(TADDR)); */ } #endif // FEATURE_HIJACK BOOL isJumpRel32(PCODE pCode) { CONTRACTL { NOTHROW; GC_NOTRIGGER; SUPPORTS_DAC; } CONTRACTL_END; PTR_BYTE pbCode = PTR_BYTE(pCode); return 0xE9 == pbCode[0]; } // // Given the same pBuffer that was used by emitJump this // method decodes the instructions and returns the jump target // PCODE decodeJump32(PCODE pBuffer) { CONTRACTL { NOTHROW; GC_NOTRIGGER; SUPPORTS_DAC; } CONTRACTL_END; // jmp rel32 _ASSERTE(isJumpRel32(pBuffer)); return rel32Decode(pBuffer+1); } BOOL isJumpRel64(PCODE pCode) { CONTRACTL { NOTHROW; GC_NOTRIGGER; SUPPORTS_DAC; } CONTRACTL_END; PTR_BYTE pbCode = PTR_BYTE(pCode); return 0x48 == pbCode[0] && 0xB8 == pbCode[1] && 0xFF == pbCode[10] && 0xE0 == pbCode[11]; } PCODE decodeJump64(PCODE pBuffer) { CONTRACTL { NOTHROW; GC_NOTRIGGER; SUPPORTS_DAC; } CONTRACTL_END; // mov rax, xxx // jmp rax _ASSERTE(isJumpRel64(pBuffer)); return *PTR_UINT64(pBuffer+2); } #ifdef DACCESS_COMPILE BOOL GetAnyThunkTarget (CONTEXT *pctx, TADDR *pTarget, TADDR *pTargetMethodDesc) { TADDR pThunk = GetIP(pctx); *pTargetMethodDesc = NULL; // // Check for something generated by emitJump. // if (isJumpRel64(pThunk)) { *pTarget = decodeJump64(pThunk); return TRUE; } return FALSE; } #endif // DACCESS_COMPILE #ifndef DACCESS_COMPILE // Note: This is only used on server GC on Windows. // // This function returns the number of logical processors on a given physical chip. If it cannot // determine the number of logical cpus, or the machine is not populated uniformly with the same // type of processors, this function returns 1. void EncodeLoadAndJumpThunk (LPBYTE pBuffer, LPVOID pv, LPVOID pTarget) { CONTRACTL { THROWS; GC_NOTRIGGER; MODE_ANY; PRECONDITION(CheckPointer(pBuffer)); } CONTRACTL_END; // mov r10, pv 49 ba xx xx xx xx xx xx xx xx pBuffer[0] = 0x49; pBuffer[1] = 0xBA; *((UINT64 UNALIGNED *)&pBuffer[2]) = (UINT64)pv; // mov rax, pTarget 48 b8 xx xx xx xx xx xx xx xx pBuffer[10] = 0x48; pBuffer[11] = 0xB8; *((UINT64 UNALIGNED *)&pBuffer[12]) = (UINT64)pTarget; // jmp rax ff e0 pBuffer[20] = 0xFF; pBuffer[21] = 0xE0; _ASSERTE(DbgIsExecutable(pBuffer, 22)); } void emitCOMStubCall (ComCallMethodDesc *pCOMMethodRX, ComCallMethodDesc *pCOMMethodRW, PCODE target) { CONTRACT_VOID { THROWS; GC_NOTRIGGER; MODE_ANY; } CONTRACT_END; BYTE *pBufferRX = (BYTE*)pCOMMethodRX - COMMETHOD_CALL_PRESTUB_SIZE; BYTE *pBufferRW = (BYTE*)pCOMMethodRW - COMMETHOD_CALL_PRESTUB_SIZE; // We need the target to be in a 64-bit aligned memory location and the call instruction // to immediately precede the ComCallMethodDesc. We'll generate an indirect call to avoid // consuming 3 qwords for this (mov rax, | target | nops & call rax). // dq 123456789abcdef0h // nop 90 // nop 90 // call [$ - 10] ff 15 f0 ff ff ff *((UINT64 *)&pBufferRW[COMMETHOD_CALL_PRESTUB_ADDRESS_OFFSET]) = (UINT64)target; pBufferRW[-2] = 0x90; pBufferRW[-1] = 0x90; pBufferRW[0] = 0xFF; pBufferRW[1] = 0x15; *((UINT32 UNALIGNED *)&pBufferRW[2]) = (UINT32)(COMMETHOD_CALL_PRESTUB_ADDRESS_OFFSET - COMMETHOD_CALL_PRESTUB_SIZE); _ASSERTE(DbgIsExecutable(pBufferRX, COMMETHOD_CALL_PRESTUB_SIZE)); RETURN; } void emitJump(LPBYTE pBufferRX, LPBYTE pBufferRW, LPVOID target) { CONTRACTL { THROWS; GC_NOTRIGGER; MODE_ANY; PRECONDITION(CheckPointer(pBufferRX)); } CONTRACTL_END; // mov rax, 123456789abcdef0h 48 b8 xx xx xx xx xx xx xx xx // jmp rax ff e0 pBufferRW[0] = 0x48; pBufferRW[1] = 0xB8; *((UINT64 UNALIGNED *)&pBufferRW[2]) = (UINT64)target; pBufferRW[10] = 0xFF; pBufferRW[11] = 0xE0; _ASSERTE(DbgIsExecutable(pBufferRX, 12)); } void UMEntryThunkCode::Encode(UMEntryThunkCode *pEntryThunkCodeRX, BYTE* pTargetCode, void* pvSecretParam) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; // padding // CC CC CC CC // mov r10, pUMEntryThunk // 49 ba xx xx xx xx xx xx xx xx // METHODDESC_REGISTER // mov rax, pJmpDest // 48 b8 xx xx xx xx xx xx xx xx // need to ensure this imm64 is qword aligned // TAILJMP_RAX // 48 FF E0 #ifdef _DEBUG m_padding[0] = X86_INSTR_INT3; m_padding[1] = X86_INSTR_INT3; m_padding[2] = X86_INSTR_INT3; m_padding[3] = X86_INSTR_INT3; #endif // _DEBUG m_movR10[0] = REX_PREFIX_BASE | REX_OPERAND_SIZE_64BIT | REX_OPCODE_REG_EXT; m_movR10[1] = 0xBA; m_uet = pvSecretParam; m_movRAX[0] = REX_PREFIX_BASE | REX_OPERAND_SIZE_64BIT; m_movRAX[1] = 0xB8; m_execstub = pTargetCode; m_jmpRAX[0] = REX_PREFIX_BASE | REX_OPERAND_SIZE_64BIT; m_jmpRAX[1] = 0xFF; m_jmpRAX[2] = 0xE0; _ASSERTE(DbgIsExecutable(&pEntryThunkCodeRX->m_movR10[0], &pEntryThunkCodeRX->m_jmpRAX[3]-&pEntryThunkCodeRX->m_movR10[0])); } void UMEntryThunkCode::Poison() { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; ExecutableWriterHolder<UMEntryThunkCode> thunkWriterHolder(this, sizeof(UMEntryThunkCode)); UMEntryThunkCode *pThisRW = thunkWriterHolder.GetRW(); pThisRW->m_execstub = (BYTE *)UMEntryThunk::ReportViolation; pThisRW->m_movR10[0] = REX_PREFIX_BASE | REX_OPERAND_SIZE_64BIT; #ifdef _WIN32 // mov rcx, pUMEntryThunk // 48 b9 xx xx xx xx xx xx xx xx pThisRW->m_movR10[1] = 0xB9; #else // mov rdi, pUMEntryThunk // 48 bf xx xx xx xx xx xx xx xx pThisRW->m_movR10[1] = 0xBF; #endif ClrFlushInstructionCache(&m_movR10[0], &m_jmpRAX[3]-&m_movR10[0]); } UMEntryThunk* UMEntryThunk::Decode(LPVOID pCallback) { LIMITED_METHOD_CONTRACT; UMEntryThunkCode *pThunkCode = (UMEntryThunkCode*)((BYTE*)pCallback - UMEntryThunkCode::GetEntryPointOffset()); return (UMEntryThunk*)pThunkCode->m_uet; } INT32 rel32UsingJumpStub(INT32 UNALIGNED * pRel32, PCODE target, MethodDesc *pMethod, LoaderAllocator *pLoaderAllocator /* = NULL */, bool throwOnOutOfMemoryWithinRange /*= true*/) { CONTRACTL { THROWS; // Creating a JumpStub could throw OutOfMemory GC_NOTRIGGER; PRECONDITION(pMethod != NULL || pLoaderAllocator != NULL); // If a loader allocator isn't explicitly provided, we must be able to get one via the MethodDesc. PRECONDITION(pLoaderAllocator != NULL || pMethod->GetLoaderAllocator() != NULL); // If a domain is provided, the MethodDesc mustn't yet be set up to have one, or it must match the MethodDesc's domain, // unless we're in a compilation domain (NGen loads assemblies as domain-bound but compiles them as domain neutral). PRECONDITION(!pLoaderAllocator || !pMethod || pMethod->GetMethodDescChunk()->GetMethodTable() == NULL || pLoaderAllocator == pMethod->GetMethodDescChunk()->GetFirstMethodDesc()->GetLoaderAllocator()); } CONTRACTL_END; TADDR baseAddr = (TADDR)pRel32 + 4; INT_PTR offset = target - baseAddr; if (!FitsInI4(offset) INDEBUG(|| PEDecoder::GetForceRelocs())) { TADDR loAddr = baseAddr + INT32_MIN; if (loAddr > baseAddr) loAddr = UINT64_MIN; // overflow TADDR hiAddr = baseAddr + INT32_MAX; if (hiAddr < baseAddr) hiAddr = UINT64_MAX; // overflow // Always try to allocate with throwOnOutOfMemoryWithinRange:false first to conserve reserveForJumpStubs until when // it is really needed. LoaderCodeHeap::CreateCodeHeap and EEJitManager::CanUseCodeHeap won't use the reserved // space when throwOnOutOfMemoryWithinRange is false. // // The reserved space should be only used by jump stubs for precodes and other similar code fragments. It should // not be used by JITed code. And since the accounting of the reserved space is not precise, we are conservative // and try to save the reserved space until it is really needed to avoid throwing out of memory within range exception. PCODE jumpStubAddr = ExecutionManager::jumpStub(pMethod, target, (BYTE *)loAddr, (BYTE *)hiAddr, pLoaderAllocator, /* throwOnOutOfMemoryWithinRange */ false); if (jumpStubAddr == NULL) { if (!throwOnOutOfMemoryWithinRange) return 0; jumpStubAddr = ExecutionManager::jumpStub(pMethod, target, (BYTE *)loAddr, (BYTE *)hiAddr, pLoaderAllocator, /* throwOnOutOfMemoryWithinRange */ true); } offset = jumpStubAddr - baseAddr; if (!FitsInI4(offset)) { _ASSERTE(!"jump stub was not in expected range"); EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE); } } _ASSERTE(FitsInI4(offset)); return static_cast<INT32>(offset); } INT32 rel32UsingPreallocatedJumpStub(INT32 UNALIGNED * pRel32, PCODE target, PCODE jumpStubAddrRX, PCODE jumpStubAddrRW, bool emitJump) { CONTRACTL { THROWS; // emitBackToBackJump may throw (see emitJump) GC_NOTRIGGER; } CONTRACTL_END; TADDR baseAddr = (TADDR)pRel32 + 4; _ASSERTE(FitsInI4(jumpStubAddrRX - baseAddr)); INT_PTR offset = target - baseAddr; if (!FitsInI4(offset) INDEBUG(|| PEDecoder::GetForceRelocs())) { offset = jumpStubAddrRX - baseAddr; if (!FitsInI4(offset)) { _ASSERTE(!"jump stub was not in expected range"); EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE); } if (emitJump) { emitBackToBackJump((LPBYTE)jumpStubAddrRX, (LPBYTE)jumpStubAddrRW, (LPVOID)target); } else { _ASSERTE(decodeBackToBackJump(jumpStubAddrRX) == target); } } _ASSERTE(FitsInI4(offset)); return static_cast<INT32>(offset); } BOOL DoesSlotCallPrestub(PCODE pCode) { CONTRACTL { NOTHROW; GC_NOTRIGGER; PRECONDITION(pCode != GetPreStubEntryPoint()); } CONTRACTL_END; // AMD64 has the following possible sequences for prestub logic: // 1. slot -> temporary entrypoint -> prestub // 2. slot -> precode -> prestub // 3. slot -> precode -> jumprel64 (jump stub) -> prestub // 4. slot -> precode -> jumprel64 (NGEN case) -> prestub #ifdef HAS_COMPACT_ENTRYPOINTS if (MethodDescChunk::GetMethodDescFromCompactEntryPoint(pCode, TRUE) != NULL) { return TRUE; } #endif if (!IS_ALIGNED(pCode, PRECODE_ALIGNMENT)) { return FALSE; } #ifdef HAS_FIXUP_PRECODE if (*PTR_BYTE(pCode) == X86_INSTR_CALL_REL32) { // Note that call could have been patched to jmp in the meantime pCode = rel32Decode(pCode+1); // JumpStub if (isJumpRel64(pCode)) { pCode = decodeJump64(pCode); } return pCode == (TADDR)PrecodeFixupThunk; } #endif if (*PTR_USHORT(pCode) != X86_INSTR_MOV_R10_IMM64 || // mov rax,XXXX *PTR_BYTE(pCode+10) != X86_INSTR_NOP || // nop *PTR_BYTE(pCode+11) != X86_INSTR_JMP_REL32) // jmp rel32 { return FALSE; } pCode = rel32Decode(pCode+12); // JumpStub if (isJumpRel64(pCode)) { pCode = decodeJump64(pCode); } return pCode == GetPreStubEntryPoint(); } // // Some AMD64 assembly functions have one or more DWORDS at the end of the function // that specify the offsets where significant instructions are // we use this function to get at these offsets // DWORD GetOffsetAtEndOfFunction(ULONGLONG uImageBase, PT_RUNTIME_FUNCTION pFunctionEntry, int offsetNum /* = 1*/) { CONTRACTL { MODE_ANY; NOTHROW; GC_NOTRIGGER; PRECONDITION((offsetNum > 0) && (offsetNum < 20)); /* we only allow reasonable offsetNums 1..19 */ } CONTRACTL_END; DWORD functionSize = pFunctionEntry->EndAddress - pFunctionEntry->BeginAddress; BYTE* pEndOfFunction = (BYTE*) (uImageBase + pFunctionEntry->EndAddress); DWORD* pOffset = (DWORD*) (pEndOfFunction) - offsetNum; DWORD offsetInFunc = *pOffset; _ASSERTE_ALL_BUILDS("clr/src/VM/AMD64/cGenAMD64.cpp", (offsetInFunc >= 0) && (offsetInFunc < functionSize)); return offsetInFunc; } #ifdef FEATURE_READYTORUN // // Allocation of dynamic helpers // #define DYNAMIC_HELPER_ALIGNMENT sizeof(TADDR) #define BEGIN_DYNAMIC_HELPER_EMIT(size) \ SIZE_T cb = size; \ SIZE_T cbAligned = ALIGN_UP(cb, DYNAMIC_HELPER_ALIGNMENT); \ BYTE * pStartRX = (BYTE *)(void*)pAllocator->GetDynamicHelpersHeap()->AllocAlignedMem(cbAligned, DYNAMIC_HELPER_ALIGNMENT); \ ExecutableWriterHolder<BYTE> startWriterHolder(pStartRX, cbAligned); \ BYTE * pStart = startWriterHolder.GetRW(); \ size_t rxOffset = pStartRX - pStart; \ BYTE * p = pStart; #define END_DYNAMIC_HELPER_EMIT() \ _ASSERTE(pStart + cb == p); \ while (p < pStart + cbAligned) *p++ = X86_INSTR_INT3; \ ClrFlushInstructionCache(pStartRX, cbAligned); \ return (PCODE)pStartRX PCODE DynamicHelpers::CreateHelper(LoaderAllocator * pAllocator, TADDR arg, PCODE target) { STANDARD_VM_CONTRACT; BEGIN_DYNAMIC_HELPER_EMIT(15); #ifdef UNIX_AMD64_ABI *(UINT16 *)p = 0xBF48; // mov rdi, XXXXXX #else *(UINT16 *)p = 0xB948; // mov rcx, XXXXXX #endif p += 2; *(TADDR *)p = arg; p += 8; *p++ = X86_INSTR_JMP_REL32; // jmp rel32 *(INT32 *)p = rel32UsingJumpStub((INT32 *)(p + rxOffset), target, NULL, pAllocator); p += 4; END_DYNAMIC_HELPER_EMIT(); } void DynamicHelpers::EmitHelperWithArg(BYTE*& p, size_t rxOffset, LoaderAllocator * pAllocator, TADDR arg, PCODE target) { CONTRACTL { GC_NOTRIGGER; PRECONDITION(p != NULL && target != NULL); } CONTRACTL_END; // Move an an argument into the second argument register and jump to a target function. #ifdef UNIX_AMD64_ABI *(UINT16 *)p = 0xBE48; // mov rsi, XXXXXX #else *(UINT16 *)p = 0xBA48; // mov rdx, XXXXXX #endif p += 2; *(TADDR *)p = arg; p += 8; *p++ = X86_INSTR_JMP_REL32; // jmp rel32 *(INT32 *)p = rel32UsingJumpStub((INT32 *)(p + rxOffset), target, NULL, pAllocator); p += 4; } PCODE DynamicHelpers::CreateHelperWithArg(LoaderAllocator * pAllocator, TADDR arg, PCODE target) { BEGIN_DYNAMIC_HELPER_EMIT(15); EmitHelperWithArg(p, rxOffset, pAllocator, arg, target); END_DYNAMIC_HELPER_EMIT(); } PCODE DynamicHelpers::CreateHelper(LoaderAllocator * pAllocator, TADDR arg, TADDR arg2, PCODE target) { BEGIN_DYNAMIC_HELPER_EMIT(25); #ifdef UNIX_AMD64_ABI *(UINT16 *)p = 0xBF48; // mov rdi, XXXXXX #else *(UINT16 *)p = 0xB948; // mov rcx, XXXXXX #endif p += 2; *(TADDR *)p = arg; p += 8; #ifdef UNIX_AMD64_ABI *(UINT16 *)p = 0xBE48; // mov rsi, XXXXXX #else *(UINT16 *)p = 0xBA48; // mov rdx, XXXXXX #endif p += 2; *(TADDR *)p = arg2; p += 8; *p++ = X86_INSTR_JMP_REL32; // jmp rel32 *(INT32 *)p = rel32UsingJumpStub((INT32 *)(p + rxOffset), target, NULL, pAllocator); p += 4; END_DYNAMIC_HELPER_EMIT(); } PCODE DynamicHelpers::CreateHelperArgMove(LoaderAllocator * pAllocator, TADDR arg, PCODE target) { BEGIN_DYNAMIC_HELPER_EMIT(18); #ifdef UNIX_AMD64_ABI *p++ = 0x48; // mov rsi, rdi *(UINT16 *)p = 0xF78B; #else *p++ = 0x48; // mov rdx, rcx *(UINT16 *)p = 0xD18B; #endif p += 2; #ifdef UNIX_AMD64_ABI *(UINT16 *)p = 0xBF48; // mov rdi, XXXXXX #else *(UINT16 *)p = 0xB948; // mov rcx, XXXXXX #endif p += 2; *(TADDR *)p = arg; p += 8; *p++ = X86_INSTR_JMP_REL32; // jmp rel32 *(INT32 *)p = rel32UsingJumpStub((INT32 *)(p + rxOffset), target, NULL, pAllocator); p += 4; END_DYNAMIC_HELPER_EMIT(); } PCODE DynamicHelpers::CreateReturn(LoaderAllocator * pAllocator) { BEGIN_DYNAMIC_HELPER_EMIT(1); *p++ = 0xC3; // ret END_DYNAMIC_HELPER_EMIT(); } PCODE DynamicHelpers::CreateReturnConst(LoaderAllocator * pAllocator, TADDR arg) { BEGIN_DYNAMIC_HELPER_EMIT(11); *(UINT16 *)p = 0xB848; // mov rax, XXXXXX p += 2; *(TADDR *)p = arg; p += 8; *p++ = 0xC3; // ret END_DYNAMIC_HELPER_EMIT(); } PCODE DynamicHelpers::CreateReturnIndirConst(LoaderAllocator * pAllocator, TADDR arg, INT8 offset) { BEGIN_DYNAMIC_HELPER_EMIT((offset != 0) ? 15 : 11); *(UINT16 *)p = 0xA148; // mov rax, [XXXXXX] p += 2; *(TADDR *)p = arg; p += 8; if (offset != 0) { // add rax, <offset> *p++ = 0x48; *p++ = 0x83; *p++ = 0xC0; *p++ = offset; } *p++ = 0xC3; // ret END_DYNAMIC_HELPER_EMIT(); } PCODE DynamicHelpers::CreateHelperWithTwoArgs(LoaderAllocator * pAllocator, TADDR arg, PCODE target) { BEGIN_DYNAMIC_HELPER_EMIT(15); #ifdef UNIX_AMD64_ABI *(UINT16 *)p = 0xBA48; // mov rdx, XXXXXX #else *(UINT16 *)p = 0xB849; // mov r8, XXXXXX #endif p += 2; *(TADDR *)p = arg; p += 8; *p++ = X86_INSTR_JMP_REL32; // jmp rel32 *(INT32 *)p = rel32UsingJumpStub((INT32 *)(p + rxOffset), target, NULL, pAllocator); p += 4; END_DYNAMIC_HELPER_EMIT(); } PCODE DynamicHelpers::CreateHelperWithTwoArgs(LoaderAllocator * pAllocator, TADDR arg, TADDR arg2, PCODE target) { BEGIN_DYNAMIC_HELPER_EMIT(25); #ifdef UNIX_AMD64_ABI *(UINT16 *)p = 0xBA48; // mov rdx, XXXXXX #else *(UINT16 *)p = 0xB849; // mov r8, XXXXXX #endif p += 2; *(TADDR *)p = arg; p += 8; #ifdef UNIX_AMD64_ABI *(UINT16 *)p = 0xB948; // mov rcx, XXXXXX #else *(UINT16 *)p = 0xB949; // mov r9, XXXXXX #endif p += 2; *(TADDR *)p = arg2; p += 8; *p++ = X86_INSTR_JMP_REL32; // jmp rel32 *(INT32 *)p = rel32UsingJumpStub((INT32 *)(p + rxOffset), target, NULL, pAllocator); p += 4; END_DYNAMIC_HELPER_EMIT(); } PCODE DynamicHelpers::CreateDictionaryLookupHelper(LoaderAllocator * pAllocator, CORINFO_RUNTIME_LOOKUP * pLookup, DWORD dictionaryIndexAndSlot, Module * pModule) { STANDARD_VM_CONTRACT; PCODE helperAddress = (pLookup->helper == CORINFO_HELP_RUNTIMEHANDLE_METHOD ? GetEEFuncEntryPoint(JIT_GenericHandleMethodWithSlotAndModule) : GetEEFuncEntryPoint(JIT_GenericHandleClassWithSlotAndModule)); GenericHandleArgs * pArgs = (GenericHandleArgs *)(void *)pAllocator->GetDynamicHelpersHeap()->AllocAlignedMem(sizeof(GenericHandleArgs), DYNAMIC_HELPER_ALIGNMENT); ExecutableWriterHolder<GenericHandleArgs> argsWriterHolder(pArgs, sizeof(GenericHandleArgs)); argsWriterHolder.GetRW()->dictionaryIndexAndSlot = dictionaryIndexAndSlot; argsWriterHolder.GetRW()->signature = pLookup->signature; argsWriterHolder.GetRW()->module = (CORINFO_MODULE_HANDLE)pModule; WORD slotOffset = (WORD)(dictionaryIndexAndSlot & 0xFFFF) * sizeof(Dictionary*); // It's available only via the run-time helper function if (pLookup->indirections == CORINFO_USEHELPER) { BEGIN_DYNAMIC_HELPER_EMIT(15); // rcx/rdi contains the generic context parameter // mov rdx/rsi,pArgs // jmp helperAddress EmitHelperWithArg(p, rxOffset, pAllocator, (TADDR)pArgs, helperAddress); END_DYNAMIC_HELPER_EMIT(); } else { int indirectionsSize = 0; for (WORD i = 0; i < pLookup->indirections; i++) indirectionsSize += (pLookup->offsets[i] >= 0x80 ? 7 : 4); int codeSize = indirectionsSize + (pLookup->testForNull ? 21 : 1) + (pLookup->sizeOffset != CORINFO_NO_SIZE_CHECK ? 13 : 0); BEGIN_DYNAMIC_HELPER_EMIT(codeSize); BYTE* pJLECall = NULL; for (WORD i = 0; i < pLookup->indirections; i++) { if (i == pLookup->indirections - 1 && pLookup->sizeOffset != CORINFO_NO_SIZE_CHECK) { _ASSERTE(pLookup->testForNull && i > 0); // cmp qword ptr[rax + sizeOffset],slotOffset *(UINT32*)p = 0x00b88148; p += 3; *(UINT32*)p = (UINT32)pLookup->sizeOffset; p += 4; *(UINT32*)p = (UINT32)slotOffset; p += 4; // jle 'HELPER CALL' *p++ = 0x7e; pJLECall = p++; // Offset filled later } if (i == 0) { // Move from rcx|rdi if it's the first indirection, otherwise from rax #ifdef UNIX_AMD64_ABI // mov rax,qword ptr [rdi+offset] if (pLookup->offsets[i] >= 0x80) { *(UINT32*)p = 0x00878b48; p += 3; *(UINT32*)p = (UINT32)pLookup->offsets[i]; p += 4; } else { *(UINT32*)p = 0x00478b48; p += 3; *p++ = (BYTE)pLookup->offsets[i]; } #else // mov rax,qword ptr [rcx+offset] if (pLookup->offsets[i] >= 0x80) { *(UINT32*)p = 0x00818b48; p += 3; *(UINT32*)p = (UINT32)pLookup->offsets[i]; p += 4; } else { *(UINT32*)p = 0x00418b48; p += 3; *p++ = (BYTE)pLookup->offsets[i]; } #endif } else { // mov rax,qword ptr [rax+offset] if (pLookup->offsets[i] >= 0x80) { *(UINT32*)p = 0x00808b48; p += 3; *(UINT32*)p = (UINT32)pLookup->offsets[i]; p += 4; } else { *(UINT32*)p = 0x00408b48; p += 3; *p++ = (BYTE)pLookup->offsets[i]; } } } // No null test required if (!pLookup->testForNull) { _ASSERTE(pLookup->sizeOffset == CORINFO_NO_SIZE_CHECK); // No fixups needed for R2R *p++ = 0xC3; // ret } else { // rcx/rdi contains the value of the dictionary slot entry _ASSERTE(pLookup->indirections != 0); *(UINT32*)p = 0x00c08548; p += 3; // test rax,rax // je 'HELPER_CALL' (a jump of 1 byte) *(UINT16*)p = 0x0174; p += 2; *p++ = 0xC3; // ret // 'HELPER_CALL' { if (pJLECall != NULL) *pJLECall = (BYTE)(p - pJLECall - 1); // rcx|rdi already contains the generic context parameter // mov rdx|rsi,pArgs // jmp helperAddress EmitHelperWithArg(p, rxOffset, pAllocator, (TADDR)pArgs, helperAddress); } } END_DYNAMIC_HELPER_EMIT(); } } #endif // FEATURE_READYTORUN #endif // DACCESS_COMPILE
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // Various helper routines for generating AMD64 assembly code. // // Precompiled Header #include "common.h" #include "stublink.h" #include "cgensys.h" #include "siginfo.hpp" #include "excep.h" #include "ecall.h" #include "dllimport.h" #include "dllimportcallback.h" #include "dbginterface.h" #include "fcall.h" #include "array.h" #include "virtualcallstub.h" #include "jitinterface.h" #ifdef FEATURE_COMINTEROP #include "clrtocomcall.h" #endif // FEATURE_COMINTEROP void UpdateRegDisplayFromCalleeSavedRegisters(REGDISPLAY * pRD, CalleeSavedRegisters * pRegs) { LIMITED_METHOD_CONTRACT; T_CONTEXT * pContext = pRD->pCurrentContext; #define CALLEE_SAVED_REGISTER(regname) pContext->regname = pRegs->regname; ENUM_CALLEE_SAVED_REGISTERS(); #undef CALLEE_SAVED_REGISTER KNONVOLATILE_CONTEXT_POINTERS * pContextPointers = pRD->pCurrentContextPointers; #define CALLEE_SAVED_REGISTER(regname) pContextPointers->regname = (PULONG64)&pRegs->regname; ENUM_CALLEE_SAVED_REGISTERS(); #undef CALLEE_SAVED_REGISTER } void ClearRegDisplayArgumentAndScratchRegisters(REGDISPLAY * pRD) { LIMITED_METHOD_CONTRACT; KNONVOLATILE_CONTEXT_POINTERS * pContextPointers = pRD->pCurrentContextPointers; pContextPointers->Rax = NULL; #ifdef UNIX_AMD64_ABI pContextPointers->Rsi = NULL; pContextPointers->Rdi = NULL; #endif pContextPointers->Rcx = NULL; pContextPointers->Rdx = NULL; pContextPointers->R8 = NULL; pContextPointers->R9 = NULL; pContextPointers->R10 = NULL; pContextPointers->R11 = NULL; } void TransitionFrame::UpdateRegDisplay(const PREGDISPLAY pRD) { LIMITED_METHOD_CONTRACT; pRD->IsCallerContextValid = FALSE; pRD->IsCallerSPValid = FALSE; // Don't add usage of this field. This is only temporary. pRD->pCurrentContext->Rip = GetReturnAddress(); pRD->pCurrentContext->Rsp = GetSP(); UpdateRegDisplayFromCalleeSavedRegisters(pRD, GetCalleeSavedRegisters()); ClearRegDisplayArgumentAndScratchRegisters(pRD); SyncRegDisplayToCurrentContext(pRD); LOG((LF_GCROOTS, LL_INFO100000, "STACKWALK TransitionFrame::UpdateRegDisplay(rip:%p, rsp:%p)\n", pRD->ControlPC, pRD->SP)); } void InlinedCallFrame::UpdateRegDisplay(const PREGDISPLAY pRD) { CONTRACTL { NOTHROW; GC_NOTRIGGER; #ifdef PROFILING_SUPPORTED PRECONDITION(CORProfilerStackSnapshotEnabled() || InlinedCallFrame::FrameHasActiveCall(this)); #endif HOST_NOCALLS; MODE_ANY; SUPPORTS_DAC; } CONTRACTL_END; if (!InlinedCallFrame::FrameHasActiveCall(this)) { LOG((LF_CORDB, LL_ERROR, "WARNING: InlinedCallFrame::UpdateRegDisplay called on inactive frame %p\n", this)); return; } pRD->IsCallerContextValid = FALSE; pRD->IsCallerSPValid = FALSE; // Don't add usage of this field. This is only temporary. pRD->pCurrentContext->Rip = *(DWORD64 *)&m_pCallerReturnAddress; pRD->pCurrentContext->Rsp = *(DWORD64 *)&m_pCallSiteSP; pRD->pCurrentContext->Rbp = *(DWORD64 *)&m_pCalleeSavedFP; ClearRegDisplayArgumentAndScratchRegisters(pRD); #define CALLEE_SAVED_REGISTER(regname) pRD->pCurrentContextPointers->regname = NULL; ENUM_CALLEE_SAVED_REGISTERS(); #undef CALLEE_SAVED_REGISTER pRD->pCurrentContextPointers->Rbp = (DWORD64 *)&m_pCalleeSavedFP; SyncRegDisplayToCurrentContext(pRD); LOG((LF_GCROOTS, LL_INFO100000, "STACKWALK InlinedCallFrame::UpdateRegDisplay(rip:%p, rsp:%p)\n", pRD->ControlPC, pRD->SP)); } void HelperMethodFrame::UpdateRegDisplay(const PREGDISPLAY pRD) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; PRECONDITION(m_MachState._pRetAddr == PTR_TADDR(&m_MachState.m_Rip)); SUPPORTS_DAC; } CONTRACTL_END; pRD->IsCallerContextValid = FALSE; pRD->IsCallerSPValid = FALSE; // Don't add usage of this field. This is only temporary. // // Copy the saved state from the frame to the current context. // LOG((LF_GCROOTS, LL_INFO100000, "STACKWALK HelperMethodFrame::UpdateRegDisplay cached ip:%p, sp:%p\n", m_MachState.m_Rip, m_MachState.m_Rsp)); #if defined(DACCESS_COMPILE) // For DAC, we may get here when the HMF is still uninitialized. // So we may need to unwind here. if (!m_MachState.isValid()) { // This allocation throws on OOM. MachState* pUnwoundState = (MachState*)DacAllocHostOnlyInstance(sizeof(*pUnwoundState), true); InsureInit(false, pUnwoundState); pRD->pCurrentContext->Rip = pRD->ControlPC = pUnwoundState->m_Rip; pRD->pCurrentContext->Rsp = pRD->SP = pUnwoundState->m_Rsp; #define CALLEE_SAVED_REGISTER(regname) pRD->pCurrentContext->regname = pUnwoundState->m_Capture.regname; ENUM_CALLEE_SAVED_REGISTERS(); #undef CALLEE_SAVED_REGISTER #define CALLEE_SAVED_REGISTER(regname) pRD->pCurrentContextPointers->regname = pUnwoundState->m_Ptrs.p##regname; ENUM_CALLEE_SAVED_REGISTERS(); #undef CALLEE_SAVED_REGISTER ClearRegDisplayArgumentAndScratchRegisters(pRD); return; } #endif // DACCESS_COMPILE pRD->pCurrentContext->Rip = pRD->ControlPC = m_MachState.m_Rip; pRD->pCurrentContext->Rsp = pRD->SP = m_MachState.m_Rsp; #ifdef TARGET_UNIX #define CALLEE_SAVED_REGISTER(regname) pRD->pCurrentContext->regname = (m_MachState.m_Ptrs.p##regname != NULL) ? \ *m_MachState.m_Ptrs.p##regname : m_MachState.m_Unwound.regname; ENUM_CALLEE_SAVED_REGISTERS(); #undef CALLEE_SAVED_REGISTER #else // TARGET_UNIX #define CALLEE_SAVED_REGISTER(regname) pRD->pCurrentContext->regname = *m_MachState.m_Ptrs.p##regname; ENUM_CALLEE_SAVED_REGISTERS(); #undef CALLEE_SAVED_REGISTER #endif // TARGET_UNIX #define CALLEE_SAVED_REGISTER(regname) pRD->pCurrentContextPointers->regname = m_MachState.m_Ptrs.p##regname; ENUM_CALLEE_SAVED_REGISTERS(); #undef CALLEE_SAVED_REGISTER // // Clear all knowledge of scratch registers. We're skipping to any // arbitrary point on the stack, and frames aren't required to preserve or // keep track of these anyways. // ClearRegDisplayArgumentAndScratchRegisters(pRD); } void FaultingExceptionFrame::UpdateRegDisplay(const PREGDISPLAY pRD) { LIMITED_METHOD_DAC_CONTRACT; memcpy(pRD->pCurrentContext, &m_ctx, sizeof(CONTEXT)); pRD->ControlPC = m_ctx.Rip; pRD->SP = m_ctx.Rsp; pRD->pCurrentContextPointers->Rax = &m_ctx.Rax; pRD->pCurrentContextPointers->Rcx = &m_ctx.Rcx; pRD->pCurrentContextPointers->Rdx = &m_ctx.Rdx; pRD->pCurrentContextPointers->Rbx = &m_ctx.Rbx; pRD->pCurrentContextPointers->Rbp = &m_ctx.Rbp; pRD->pCurrentContextPointers->Rsi = &m_ctx.Rsi; pRD->pCurrentContextPointers->Rdi = &m_ctx.Rdi; pRD->pCurrentContextPointers->R8 = &m_ctx.R8; pRD->pCurrentContextPointers->R9 = &m_ctx.R9; pRD->pCurrentContextPointers->R10 = &m_ctx.R10; pRD->pCurrentContextPointers->R11 = &m_ctx.R11; pRD->pCurrentContextPointers->R12 = &m_ctx.R12; pRD->pCurrentContextPointers->R13 = &m_ctx.R13; pRD->pCurrentContextPointers->R14 = &m_ctx.R14; pRD->pCurrentContextPointers->R15 = &m_ctx.R15; pRD->IsCallerContextValid = FALSE; pRD->IsCallerSPValid = FALSE; // Don't add usage of this field. This is only temporary. } #ifdef FEATURE_HIJACK TADDR ResumableFrame::GetReturnAddressPtr() { LIMITED_METHOD_DAC_CONTRACT; return dac_cast<TADDR>(m_Regs) + offsetof(CONTEXT, Rip); } void ResumableFrame::UpdateRegDisplay(const PREGDISPLAY pRD) { CONTRACT_VOID { NOTHROW; GC_NOTRIGGER; MODE_ANY; SUPPORTS_DAC; } CONTRACT_END; CopyMemory(pRD->pCurrentContext, m_Regs, sizeof(CONTEXT)); pRD->ControlPC = m_Regs->Rip; pRD->SP = m_Regs->Rsp; pRD->pCurrentContextPointers->Rax = &m_Regs->Rax; pRD->pCurrentContextPointers->Rcx = &m_Regs->Rcx; pRD->pCurrentContextPointers->Rdx = &m_Regs->Rdx; pRD->pCurrentContextPointers->Rbx = &m_Regs->Rbx; pRD->pCurrentContextPointers->Rbp = &m_Regs->Rbp; pRD->pCurrentContextPointers->Rsi = &m_Regs->Rsi; pRD->pCurrentContextPointers->Rdi = &m_Regs->Rdi; pRD->pCurrentContextPointers->R8 = &m_Regs->R8; pRD->pCurrentContextPointers->R9 = &m_Regs->R9; pRD->pCurrentContextPointers->R10 = &m_Regs->R10; pRD->pCurrentContextPointers->R11 = &m_Regs->R11; pRD->pCurrentContextPointers->R12 = &m_Regs->R12; pRD->pCurrentContextPointers->R13 = &m_Regs->R13; pRD->pCurrentContextPointers->R14 = &m_Regs->R14; pRD->pCurrentContextPointers->R15 = &m_Regs->R15; pRD->IsCallerContextValid = FALSE; pRD->IsCallerSPValid = FALSE; // Don't add usage of this field. This is only temporary. RETURN; } // The HijackFrame has to know the registers that are pushed by OnHijackTripThread void HijackFrame::UpdateRegDisplay(const PREGDISPLAY pRD) { CONTRACTL { NOTHROW; GC_NOTRIGGER; SUPPORTS_DAC; } CONTRACTL_END; pRD->IsCallerContextValid = FALSE; pRD->IsCallerSPValid = FALSE; // Don't add usage of this field. This is only temporary. pRD->pCurrentContext->Rip = m_ReturnAddress; #ifdef TARGET_WINDOWS pRD->pCurrentContext->Rsp = m_Args->Rsp; #else pRD->pCurrentContext->Rsp = PTR_TO_MEMBER_TADDR(HijackArgs, m_Args, Rip) + sizeof(void *); #endif UpdateRegDisplayFromCalleeSavedRegisters(pRD, &(m_Args->Regs)); #ifdef UNIX_AMD64_ABI pRD->pCurrentContextPointers->Rsi = NULL; pRD->pCurrentContextPointers->Rdi = NULL; #endif pRD->pCurrentContextPointers->Rcx = NULL; #ifdef UNIX_AMD64_ABI pRD->pCurrentContextPointers->Rdx = (PULONG64)&m_Args->Rdx; #else // UNIX_AMD64_ABI pRD->pCurrentContextPointers->Rdx = NULL; #endif // UNIX_AMD64_ABI pRD->pCurrentContextPointers->R8 = NULL; pRD->pCurrentContextPointers->R9 = NULL; pRD->pCurrentContextPointers->R10 = NULL; pRD->pCurrentContextPointers->R11 = NULL; pRD->pCurrentContextPointers->Rax = (PULONG64)&m_Args->Rax; SyncRegDisplayToCurrentContext(pRD); /* // This only describes the top-most frame pRD->pContext = NULL; pRD->PCTAddr = dac_cast<TADDR>(m_Args) + offsetof(HijackArgs, Rip); //pRD->pPC = PTR_SLOT(pRD->PCTAddr); pRD->SP = (ULONG64)(pRD->PCTAddr + sizeof(TADDR)); */ } #endif // FEATURE_HIJACK BOOL isJumpRel32(PCODE pCode) { CONTRACTL { NOTHROW; GC_NOTRIGGER; SUPPORTS_DAC; } CONTRACTL_END; PTR_BYTE pbCode = PTR_BYTE(pCode); return 0xE9 == pbCode[0]; } // // Given the same pBuffer that was used by emitJump this // method decodes the instructions and returns the jump target // PCODE decodeJump32(PCODE pBuffer) { CONTRACTL { NOTHROW; GC_NOTRIGGER; SUPPORTS_DAC; } CONTRACTL_END; // jmp rel32 _ASSERTE(isJumpRel32(pBuffer)); return rel32Decode(pBuffer+1); } BOOL isJumpRel64(PCODE pCode) { CONTRACTL { NOTHROW; GC_NOTRIGGER; SUPPORTS_DAC; } CONTRACTL_END; PTR_BYTE pbCode = PTR_BYTE(pCode); return 0x48 == pbCode[0] && 0xB8 == pbCode[1] && 0xFF == pbCode[10] && 0xE0 == pbCode[11]; } PCODE decodeJump64(PCODE pBuffer) { CONTRACTL { NOTHROW; GC_NOTRIGGER; SUPPORTS_DAC; } CONTRACTL_END; // mov rax, xxx // jmp rax _ASSERTE(isJumpRel64(pBuffer)); return *PTR_UINT64(pBuffer+2); } #ifdef DACCESS_COMPILE BOOL GetAnyThunkTarget (CONTEXT *pctx, TADDR *pTarget, TADDR *pTargetMethodDesc) { TADDR pThunk = GetIP(pctx); *pTargetMethodDesc = NULL; // // Check for something generated by emitJump. // if (isJumpRel64(pThunk)) { *pTarget = decodeJump64(pThunk); return TRUE; } return FALSE; } #endif // DACCESS_COMPILE #ifndef DACCESS_COMPILE // Note: This is only used on server GC on Windows. // // This function returns the number of logical processors on a given physical chip. If it cannot // determine the number of logical cpus, or the machine is not populated uniformly with the same // type of processors, this function returns 1. void EncodeLoadAndJumpThunk (LPBYTE pBuffer, LPVOID pv, LPVOID pTarget) { CONTRACTL { THROWS; GC_NOTRIGGER; MODE_ANY; PRECONDITION(CheckPointer(pBuffer)); } CONTRACTL_END; // mov r10, pv 49 ba xx xx xx xx xx xx xx xx pBuffer[0] = 0x49; pBuffer[1] = 0xBA; *((UINT64 UNALIGNED *)&pBuffer[2]) = (UINT64)pv; // mov rax, pTarget 48 b8 xx xx xx xx xx xx xx xx pBuffer[10] = 0x48; pBuffer[11] = 0xB8; *((UINT64 UNALIGNED *)&pBuffer[12]) = (UINT64)pTarget; // jmp rax ff e0 pBuffer[20] = 0xFF; pBuffer[21] = 0xE0; _ASSERTE(DbgIsExecutable(pBuffer, 22)); } void emitCOMStubCall (ComCallMethodDesc *pCOMMethodRX, ComCallMethodDesc *pCOMMethodRW, PCODE target) { CONTRACT_VOID { THROWS; GC_NOTRIGGER; MODE_ANY; } CONTRACT_END; BYTE *pBufferRX = (BYTE*)pCOMMethodRX - COMMETHOD_CALL_PRESTUB_SIZE; BYTE *pBufferRW = (BYTE*)pCOMMethodRW - COMMETHOD_CALL_PRESTUB_SIZE; // We need the target to be in a 64-bit aligned memory location and the call instruction // to immediately precede the ComCallMethodDesc. We'll generate an indirect call to avoid // consuming 3 qwords for this (mov rax, | target | nops & call rax). // dq 123456789abcdef0h // nop 90 // nop 90 // call [$ - 10] ff 15 f0 ff ff ff *((UINT64 *)&pBufferRW[COMMETHOD_CALL_PRESTUB_ADDRESS_OFFSET]) = (UINT64)target; pBufferRW[-2] = 0x90; pBufferRW[-1] = 0x90; pBufferRW[0] = 0xFF; pBufferRW[1] = 0x15; *((UINT32 UNALIGNED *)&pBufferRW[2]) = (UINT32)(COMMETHOD_CALL_PRESTUB_ADDRESS_OFFSET - COMMETHOD_CALL_PRESTUB_SIZE); _ASSERTE(DbgIsExecutable(pBufferRX, COMMETHOD_CALL_PRESTUB_SIZE)); RETURN; } void emitJump(LPBYTE pBufferRX, LPBYTE pBufferRW, LPVOID target) { CONTRACTL { THROWS; GC_NOTRIGGER; MODE_ANY; PRECONDITION(CheckPointer(pBufferRX)); } CONTRACTL_END; // mov rax, 123456789abcdef0h 48 b8 xx xx xx xx xx xx xx xx // jmp rax ff e0 pBufferRW[0] = 0x48; pBufferRW[1] = 0xB8; *((UINT64 UNALIGNED *)&pBufferRW[2]) = (UINT64)target; pBufferRW[10] = 0xFF; pBufferRW[11] = 0xE0; _ASSERTE(DbgIsExecutable(pBufferRX, 12)); } void UMEntryThunkCode::Encode(UMEntryThunkCode *pEntryThunkCodeRX, BYTE* pTargetCode, void* pvSecretParam) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; // padding // CC CC CC CC // mov r10, pUMEntryThunk // 49 ba xx xx xx xx xx xx xx xx // METHODDESC_REGISTER // mov rax, pJmpDest // 48 b8 xx xx xx xx xx xx xx xx // need to ensure this imm64 is qword aligned // TAILJMP_RAX // 48 FF E0 #ifdef _DEBUG m_padding[0] = X86_INSTR_INT3; m_padding[1] = X86_INSTR_INT3; m_padding[2] = X86_INSTR_INT3; m_padding[3] = X86_INSTR_INT3; #endif // _DEBUG m_movR10[0] = REX_PREFIX_BASE | REX_OPERAND_SIZE_64BIT | REX_OPCODE_REG_EXT; m_movR10[1] = 0xBA; m_uet = pvSecretParam; m_movRAX[0] = REX_PREFIX_BASE | REX_OPERAND_SIZE_64BIT; m_movRAX[1] = 0xB8; m_execstub = pTargetCode; m_jmpRAX[0] = REX_PREFIX_BASE | REX_OPERAND_SIZE_64BIT; m_jmpRAX[1] = 0xFF; m_jmpRAX[2] = 0xE0; _ASSERTE(DbgIsExecutable(&pEntryThunkCodeRX->m_movR10[0], &pEntryThunkCodeRX->m_jmpRAX[3]-&pEntryThunkCodeRX->m_movR10[0])); } void UMEntryThunkCode::Poison() { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; ExecutableWriterHolder<UMEntryThunkCode> thunkWriterHolder(this, sizeof(UMEntryThunkCode)); UMEntryThunkCode *pThisRW = thunkWriterHolder.GetRW(); pThisRW->m_execstub = (BYTE *)UMEntryThunk::ReportViolation; pThisRW->m_movR10[0] = REX_PREFIX_BASE | REX_OPERAND_SIZE_64BIT; #ifdef _WIN32 // mov rcx, pUMEntryThunk // 48 b9 xx xx xx xx xx xx xx xx pThisRW->m_movR10[1] = 0xB9; #else // mov rdi, pUMEntryThunk // 48 bf xx xx xx xx xx xx xx xx pThisRW->m_movR10[1] = 0xBF; #endif ClrFlushInstructionCache(&m_movR10[0], &m_jmpRAX[3]-&m_movR10[0]); } UMEntryThunk* UMEntryThunk::Decode(LPVOID pCallback) { LIMITED_METHOD_CONTRACT; UMEntryThunkCode *pThunkCode = (UMEntryThunkCode*)((BYTE*)pCallback - UMEntryThunkCode::GetEntryPointOffset()); return (UMEntryThunk*)pThunkCode->m_uet; } INT32 rel32UsingJumpStub(INT32 UNALIGNED * pRel32, PCODE target, MethodDesc *pMethod, LoaderAllocator *pLoaderAllocator /* = NULL */, bool throwOnOutOfMemoryWithinRange /*= true*/) { CONTRACTL { THROWS; // Creating a JumpStub could throw OutOfMemory GC_NOTRIGGER; PRECONDITION(pMethod != NULL || pLoaderAllocator != NULL); // If a loader allocator isn't explicitly provided, we must be able to get one via the MethodDesc. PRECONDITION(pLoaderAllocator != NULL || pMethod->GetLoaderAllocator() != NULL); // If a domain is provided, the MethodDesc mustn't yet be set up to have one, or it must match the MethodDesc's domain, // unless we're in a compilation domain (NGen loads assemblies as domain-bound but compiles them as domain neutral). PRECONDITION(!pLoaderAllocator || !pMethod || pMethod->GetMethodDescChunk()->GetMethodTable() == NULL || pLoaderAllocator == pMethod->GetMethodDescChunk()->GetFirstMethodDesc()->GetLoaderAllocator()); } CONTRACTL_END; TADDR baseAddr = (TADDR)pRel32 + 4; INT_PTR offset = target - baseAddr; if (!FitsInI4(offset) INDEBUG(|| PEDecoder::GetForceRelocs())) { TADDR loAddr = baseAddr + INT32_MIN; if (loAddr > baseAddr) loAddr = UINT64_MIN; // overflow TADDR hiAddr = baseAddr + INT32_MAX; if (hiAddr < baseAddr) hiAddr = UINT64_MAX; // overflow // Always try to allocate with throwOnOutOfMemoryWithinRange:false first to conserve reserveForJumpStubs until when // it is really needed. LoaderCodeHeap::CreateCodeHeap and EEJitManager::CanUseCodeHeap won't use the reserved // space when throwOnOutOfMemoryWithinRange is false. // // The reserved space should be only used by jump stubs for precodes and other similar code fragments. It should // not be used by JITed code. And since the accounting of the reserved space is not precise, we are conservative // and try to save the reserved space until it is really needed to avoid throwing out of memory within range exception. PCODE jumpStubAddr = ExecutionManager::jumpStub(pMethod, target, (BYTE *)loAddr, (BYTE *)hiAddr, pLoaderAllocator, /* throwOnOutOfMemoryWithinRange */ false); if (jumpStubAddr == NULL) { if (!throwOnOutOfMemoryWithinRange) return 0; jumpStubAddr = ExecutionManager::jumpStub(pMethod, target, (BYTE *)loAddr, (BYTE *)hiAddr, pLoaderAllocator, /* throwOnOutOfMemoryWithinRange */ true); } offset = jumpStubAddr - baseAddr; if (!FitsInI4(offset)) { _ASSERTE(!"jump stub was not in expected range"); EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE); } } _ASSERTE(FitsInI4(offset)); return static_cast<INT32>(offset); } INT32 rel32UsingPreallocatedJumpStub(INT32 UNALIGNED * pRel32, PCODE target, PCODE jumpStubAddrRX, PCODE jumpStubAddrRW, bool emitJump) { CONTRACTL { THROWS; // emitBackToBackJump may throw (see emitJump) GC_NOTRIGGER; } CONTRACTL_END; TADDR baseAddr = (TADDR)pRel32 + 4; _ASSERTE(FitsInI4(jumpStubAddrRX - baseAddr)); INT_PTR offset = target - baseAddr; if (!FitsInI4(offset) INDEBUG(|| PEDecoder::GetForceRelocs())) { offset = jumpStubAddrRX - baseAddr; if (!FitsInI4(offset)) { _ASSERTE(!"jump stub was not in expected range"); EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE); } if (emitJump) { emitBackToBackJump((LPBYTE)jumpStubAddrRX, (LPBYTE)jumpStubAddrRW, (LPVOID)target); } else { _ASSERTE(decodeBackToBackJump(jumpStubAddrRX) == target); } } _ASSERTE(FitsInI4(offset)); return static_cast<INT32>(offset); } BOOL DoesSlotCallPrestub(PCODE pCode) { CONTRACTL { NOTHROW; GC_NOTRIGGER; PRECONDITION(pCode != GetPreStubEntryPoint()); } CONTRACTL_END; // AMD64 has the following possible sequences for prestub logic: // 1. slot -> temporary entrypoint -> prestub // 2. slot -> precode -> prestub // 3. slot -> precode -> jumprel64 (jump stub) -> prestub // 4. slot -> precode -> jumprel64 (NGEN case) -> prestub #ifdef HAS_COMPACT_ENTRYPOINTS if (MethodDescChunk::GetMethodDescFromCompactEntryPoint(pCode, TRUE) != NULL) { return TRUE; } #endif if (!IS_ALIGNED(pCode, PRECODE_ALIGNMENT)) { return FALSE; } #ifdef HAS_FIXUP_PRECODE if (*PTR_BYTE(pCode) == X86_INSTR_CALL_REL32) { // Note that call could have been patched to jmp in the meantime pCode = rel32Decode(pCode+1); // JumpStub if (isJumpRel64(pCode)) { pCode = decodeJump64(pCode); } return pCode == (TADDR)PrecodeFixupThunk; } #endif if (*PTR_USHORT(pCode) != X86_INSTR_MOV_R10_IMM64 || // mov rax,XXXX *PTR_BYTE(pCode+10) != X86_INSTR_NOP || // nop *PTR_BYTE(pCode+11) != X86_INSTR_JMP_REL32) // jmp rel32 { return FALSE; } pCode = rel32Decode(pCode+12); // JumpStub if (isJumpRel64(pCode)) { pCode = decodeJump64(pCode); } return pCode == GetPreStubEntryPoint(); } // // Some AMD64 assembly functions have one or more DWORDS at the end of the function // that specify the offsets where significant instructions are // we use this function to get at these offsets // DWORD GetOffsetAtEndOfFunction(ULONGLONG uImageBase, PT_RUNTIME_FUNCTION pFunctionEntry, int offsetNum /* = 1*/) { CONTRACTL { MODE_ANY; NOTHROW; GC_NOTRIGGER; PRECONDITION((offsetNum > 0) && (offsetNum < 20)); /* we only allow reasonable offsetNums 1..19 */ } CONTRACTL_END; DWORD functionSize = pFunctionEntry->EndAddress - pFunctionEntry->BeginAddress; BYTE* pEndOfFunction = (BYTE*) (uImageBase + pFunctionEntry->EndAddress); DWORD* pOffset = (DWORD*) (pEndOfFunction) - offsetNum; DWORD offsetInFunc = *pOffset; _ASSERTE_ALL_BUILDS("clr/src/VM/AMD64/cGenAMD64.cpp", (offsetInFunc >= 0) && (offsetInFunc < functionSize)); return offsetInFunc; } #ifdef FEATURE_READYTORUN // // Allocation of dynamic helpers // #define DYNAMIC_HELPER_ALIGNMENT sizeof(TADDR) #define BEGIN_DYNAMIC_HELPER_EMIT(size) \ SIZE_T cb = size; \ SIZE_T cbAligned = ALIGN_UP(cb, DYNAMIC_HELPER_ALIGNMENT); \ BYTE * pStartRX = (BYTE *)(void*)pAllocator->GetDynamicHelpersHeap()->AllocAlignedMem(cbAligned, DYNAMIC_HELPER_ALIGNMENT); \ ExecutableWriterHolder<BYTE> startWriterHolder(pStartRX, cbAligned); \ BYTE * pStart = startWriterHolder.GetRW(); \ size_t rxOffset = pStartRX - pStart; \ BYTE * p = pStart; #define END_DYNAMIC_HELPER_EMIT() \ _ASSERTE(pStart + cb == p); \ while (p < pStart + cbAligned) *p++ = X86_INSTR_INT3; \ ClrFlushInstructionCache(pStartRX, cbAligned); \ return (PCODE)pStartRX PCODE DynamicHelpers::CreateHelper(LoaderAllocator * pAllocator, TADDR arg, PCODE target) { STANDARD_VM_CONTRACT; BEGIN_DYNAMIC_HELPER_EMIT(15); #ifdef UNIX_AMD64_ABI *(UINT16 *)p = 0xBF48; // mov rdi, XXXXXX #else *(UINT16 *)p = 0xB948; // mov rcx, XXXXXX #endif p += 2; *(TADDR *)p = arg; p += 8; *p++ = X86_INSTR_JMP_REL32; // jmp rel32 *(INT32 *)p = rel32UsingJumpStub((INT32 *)(p + rxOffset), target, NULL, pAllocator); p += 4; END_DYNAMIC_HELPER_EMIT(); } void DynamicHelpers::EmitHelperWithArg(BYTE*& p, size_t rxOffset, LoaderAllocator * pAllocator, TADDR arg, PCODE target) { CONTRACTL { GC_NOTRIGGER; PRECONDITION(p != NULL && target != NULL); } CONTRACTL_END; // Move an an argument into the second argument register and jump to a target function. #ifdef UNIX_AMD64_ABI *(UINT16 *)p = 0xBE48; // mov rsi, XXXXXX #else *(UINT16 *)p = 0xBA48; // mov rdx, XXXXXX #endif p += 2; *(TADDR *)p = arg; p += 8; *p++ = X86_INSTR_JMP_REL32; // jmp rel32 *(INT32 *)p = rel32UsingJumpStub((INT32 *)(p + rxOffset), target, NULL, pAllocator); p += 4; } PCODE DynamicHelpers::CreateHelperWithArg(LoaderAllocator * pAllocator, TADDR arg, PCODE target) { BEGIN_DYNAMIC_HELPER_EMIT(15); EmitHelperWithArg(p, rxOffset, pAllocator, arg, target); END_DYNAMIC_HELPER_EMIT(); } PCODE DynamicHelpers::CreateHelper(LoaderAllocator * pAllocator, TADDR arg, TADDR arg2, PCODE target) { BEGIN_DYNAMIC_HELPER_EMIT(25); #ifdef UNIX_AMD64_ABI *(UINT16 *)p = 0xBF48; // mov rdi, XXXXXX #else *(UINT16 *)p = 0xB948; // mov rcx, XXXXXX #endif p += 2; *(TADDR *)p = arg; p += 8; #ifdef UNIX_AMD64_ABI *(UINT16 *)p = 0xBE48; // mov rsi, XXXXXX #else *(UINT16 *)p = 0xBA48; // mov rdx, XXXXXX #endif p += 2; *(TADDR *)p = arg2; p += 8; *p++ = X86_INSTR_JMP_REL32; // jmp rel32 *(INT32 *)p = rel32UsingJumpStub((INT32 *)(p + rxOffset), target, NULL, pAllocator); p += 4; END_DYNAMIC_HELPER_EMIT(); } PCODE DynamicHelpers::CreateHelperArgMove(LoaderAllocator * pAllocator, TADDR arg, PCODE target) { BEGIN_DYNAMIC_HELPER_EMIT(18); #ifdef UNIX_AMD64_ABI *p++ = 0x48; // mov rsi, rdi *(UINT16 *)p = 0xF78B; #else *p++ = 0x48; // mov rdx, rcx *(UINT16 *)p = 0xD18B; #endif p += 2; #ifdef UNIX_AMD64_ABI *(UINT16 *)p = 0xBF48; // mov rdi, XXXXXX #else *(UINT16 *)p = 0xB948; // mov rcx, XXXXXX #endif p += 2; *(TADDR *)p = arg; p += 8; *p++ = X86_INSTR_JMP_REL32; // jmp rel32 *(INT32 *)p = rel32UsingJumpStub((INT32 *)(p + rxOffset), target, NULL, pAllocator); p += 4; END_DYNAMIC_HELPER_EMIT(); } PCODE DynamicHelpers::CreateReturn(LoaderAllocator * pAllocator) { BEGIN_DYNAMIC_HELPER_EMIT(1); *p++ = 0xC3; // ret END_DYNAMIC_HELPER_EMIT(); } PCODE DynamicHelpers::CreateReturnConst(LoaderAllocator * pAllocator, TADDR arg) { BEGIN_DYNAMIC_HELPER_EMIT(11); *(UINT16 *)p = 0xB848; // mov rax, XXXXXX p += 2; *(TADDR *)p = arg; p += 8; *p++ = 0xC3; // ret END_DYNAMIC_HELPER_EMIT(); } PCODE DynamicHelpers::CreateReturnIndirConst(LoaderAllocator * pAllocator, TADDR arg, INT8 offset) { BEGIN_DYNAMIC_HELPER_EMIT((offset != 0) ? 15 : 11); *(UINT16 *)p = 0xA148; // mov rax, [XXXXXX] p += 2; *(TADDR *)p = arg; p += 8; if (offset != 0) { // add rax, <offset> *p++ = 0x48; *p++ = 0x83; *p++ = 0xC0; *p++ = offset; } *p++ = 0xC3; // ret END_DYNAMIC_HELPER_EMIT(); } PCODE DynamicHelpers::CreateHelperWithTwoArgs(LoaderAllocator * pAllocator, TADDR arg, PCODE target) { BEGIN_DYNAMIC_HELPER_EMIT(15); #ifdef UNIX_AMD64_ABI *(UINT16 *)p = 0xBA48; // mov rdx, XXXXXX #else *(UINT16 *)p = 0xB849; // mov r8, XXXXXX #endif p += 2; *(TADDR *)p = arg; p += 8; *p++ = X86_INSTR_JMP_REL32; // jmp rel32 *(INT32 *)p = rel32UsingJumpStub((INT32 *)(p + rxOffset), target, NULL, pAllocator); p += 4; END_DYNAMIC_HELPER_EMIT(); } PCODE DynamicHelpers::CreateHelperWithTwoArgs(LoaderAllocator * pAllocator, TADDR arg, TADDR arg2, PCODE target) { BEGIN_DYNAMIC_HELPER_EMIT(25); #ifdef UNIX_AMD64_ABI *(UINT16 *)p = 0xBA48; // mov rdx, XXXXXX #else *(UINT16 *)p = 0xB849; // mov r8, XXXXXX #endif p += 2; *(TADDR *)p = arg; p += 8; #ifdef UNIX_AMD64_ABI *(UINT16 *)p = 0xB948; // mov rcx, XXXXXX #else *(UINT16 *)p = 0xB949; // mov r9, XXXXXX #endif p += 2; *(TADDR *)p = arg2; p += 8; *p++ = X86_INSTR_JMP_REL32; // jmp rel32 *(INT32 *)p = rel32UsingJumpStub((INT32 *)(p + rxOffset), target, NULL, pAllocator); p += 4; END_DYNAMIC_HELPER_EMIT(); } PCODE DynamicHelpers::CreateDictionaryLookupHelper(LoaderAllocator * pAllocator, CORINFO_RUNTIME_LOOKUP * pLookup, DWORD dictionaryIndexAndSlot, Module * pModule) { STANDARD_VM_CONTRACT; PCODE helperAddress = (pLookup->helper == CORINFO_HELP_RUNTIMEHANDLE_METHOD ? GetEEFuncEntryPoint(JIT_GenericHandleMethodWithSlotAndModule) : GetEEFuncEntryPoint(JIT_GenericHandleClassWithSlotAndModule)); GenericHandleArgs * pArgs = (GenericHandleArgs *)(void *)pAllocator->GetDynamicHelpersHeap()->AllocAlignedMem(sizeof(GenericHandleArgs), DYNAMIC_HELPER_ALIGNMENT); ExecutableWriterHolder<GenericHandleArgs> argsWriterHolder(pArgs, sizeof(GenericHandleArgs)); argsWriterHolder.GetRW()->dictionaryIndexAndSlot = dictionaryIndexAndSlot; argsWriterHolder.GetRW()->signature = pLookup->signature; argsWriterHolder.GetRW()->module = (CORINFO_MODULE_HANDLE)pModule; WORD slotOffset = (WORD)(dictionaryIndexAndSlot & 0xFFFF) * sizeof(Dictionary*); // It's available only via the run-time helper function if (pLookup->indirections == CORINFO_USEHELPER) { BEGIN_DYNAMIC_HELPER_EMIT(15); // rcx/rdi contains the generic context parameter // mov rdx/rsi,pArgs // jmp helperAddress EmitHelperWithArg(p, rxOffset, pAllocator, (TADDR)pArgs, helperAddress); END_DYNAMIC_HELPER_EMIT(); } else { int indirectionsSize = 0; for (WORD i = 0; i < pLookup->indirections; i++) indirectionsSize += (pLookup->offsets[i] >= 0x80 ? 7 : 4); int codeSize = indirectionsSize + (pLookup->testForNull ? 21 : 1) + (pLookup->sizeOffset != CORINFO_NO_SIZE_CHECK ? 13 : 0); BEGIN_DYNAMIC_HELPER_EMIT(codeSize); BYTE* pJLECall = NULL; for (WORD i = 0; i < pLookup->indirections; i++) { if (i == pLookup->indirections - 1 && pLookup->sizeOffset != CORINFO_NO_SIZE_CHECK) { _ASSERTE(pLookup->testForNull && i > 0); // cmp qword ptr[rax + sizeOffset],slotOffset *(UINT32*)p = 0x00b88148; p += 3; *(UINT32*)p = (UINT32)pLookup->sizeOffset; p += 4; *(UINT32*)p = (UINT32)slotOffset; p += 4; // jle 'HELPER CALL' *p++ = 0x7e; pJLECall = p++; // Offset filled later } if (i == 0) { // Move from rcx|rdi if it's the first indirection, otherwise from rax #ifdef UNIX_AMD64_ABI // mov rax,qword ptr [rdi+offset] if (pLookup->offsets[i] >= 0x80) { *(UINT32*)p = 0x00878b48; p += 3; *(UINT32*)p = (UINT32)pLookup->offsets[i]; p += 4; } else { *(UINT32*)p = 0x00478b48; p += 3; *p++ = (BYTE)pLookup->offsets[i]; } #else // mov rax,qword ptr [rcx+offset] if (pLookup->offsets[i] >= 0x80) { *(UINT32*)p = 0x00818b48; p += 3; *(UINT32*)p = (UINT32)pLookup->offsets[i]; p += 4; } else { *(UINT32*)p = 0x00418b48; p += 3; *p++ = (BYTE)pLookup->offsets[i]; } #endif } else { // mov rax,qword ptr [rax+offset] if (pLookup->offsets[i] >= 0x80) { *(UINT32*)p = 0x00808b48; p += 3; *(UINT32*)p = (UINT32)pLookup->offsets[i]; p += 4; } else { *(UINT32*)p = 0x00408b48; p += 3; *p++ = (BYTE)pLookup->offsets[i]; } } } // No null test required if (!pLookup->testForNull) { _ASSERTE(pLookup->sizeOffset == CORINFO_NO_SIZE_CHECK); // No fixups needed for R2R *p++ = 0xC3; // ret } else { // rcx/rdi contains the value of the dictionary slot entry _ASSERTE(pLookup->indirections != 0); *(UINT32*)p = 0x00c08548; p += 3; // test rax,rax // je 'HELPER_CALL' (a jump of 1 byte) *(UINT16*)p = 0x0174; p += 2; *p++ = 0xC3; // ret // 'HELPER_CALL' { if (pJLECall != NULL) *pJLECall = (BYTE)(p - pJLECall - 1); // rcx|rdi already contains the generic context parameter // mov rdx|rsi,pArgs // jmp helperAddress EmitHelperWithArg(p, rxOffset, pAllocator, (TADDR)pArgs, helperAddress); } } END_DYNAMIC_HELPER_EMIT(); } } #endif // FEATURE_READYTORUN #endif // DACCESS_COMPILE
-1
dotnet/runtime
66,367
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled
When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
jakobbotsch
2022-03-09T00:01:53Z
2022-03-09T12:59:09Z
c9f7f7389e8e9a00d501aef696333b67d218baac
cef825fd5425203ac28d073bf9fccc33c638f179
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled. When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
./src/coreclr/pal/inc/rt/commdlg.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // // =========================================================================== // File: commdlg.h // // =========================================================================== // dummy commdlg.h for PAL #include "palrt.h"
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // // =========================================================================== // File: commdlg.h // // =========================================================================== // dummy commdlg.h for PAL #include "palrt.h"
-1
dotnet/runtime
66,367
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled
When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
jakobbotsch
2022-03-09T00:01:53Z
2022-03-09T12:59:09Z
c9f7f7389e8e9a00d501aef696333b67d218baac
cef825fd5425203ac28d073bf9fccc33c638f179
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled. When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
./src/coreclr/vm/codeversion.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // =========================================================================== // File: CodeVersion.h // // =========================================================================== #ifndef CODE_VERSION_H #define CODE_VERSION_H class ILCodeVersion; typedef DWORD NativeCodeVersionId; #ifdef FEATURE_CODE_VERSIONING class NativeCodeVersionNode; typedef DPTR(class NativeCodeVersionNode) PTR_NativeCodeVersionNode; class NativeCodeVersionCollection; class NativeCodeVersionIterator; class ILCodeVersionNode; typedef DPTR(class ILCodeVersionNode) PTR_ILCodeVersionNode; class ILCodeVersionCollection; class ILCodeVersionIterator; class MethodDescVersioningState; typedef DPTR(class MethodDescVersioningState) PTR_MethodDescVersioningState; class ILCodeVersioningState; typedef DPTR(class ILCodeVersioningState) PTR_ILCodeVersioningState; class CodeVersionManager; typedef DPTR(class CodeVersionManager) PTR_CodeVersionManager; #endif #ifdef HAVE_GCCOVER class GCCoverageInfo; typedef DPTR(class GCCoverageInfo) PTR_GCCoverageInfo; #endif #ifdef FEATURE_ON_STACK_REPLACEMENT struct PatchpointInfo; typedef DPTR(struct PatchpointInfo) PTR_PatchpointInfo; #endif class NativeCodeVersion { #ifdef FEATURE_CODE_VERSIONING friend class MethodDescVersioningState; friend class ILCodeVersion; #endif public: NativeCodeVersion(); NativeCodeVersion(const NativeCodeVersion & rhs); #ifdef FEATURE_CODE_VERSIONING NativeCodeVersion(PTR_NativeCodeVersionNode pVersionNode); #endif explicit NativeCodeVersion(PTR_MethodDesc pMethod); BOOL IsNull() const; PTR_MethodDesc GetMethodDesc() const; NativeCodeVersionId GetVersionId() const; BOOL IsDefaultVersion() const; PCODE GetNativeCode() const; #ifdef FEATURE_CODE_VERSIONING ILCodeVersion GetILCodeVersion() const; ReJITID GetILCodeVersionId() const; #endif #ifndef DACCESS_COMPILE BOOL SetNativeCodeInterlocked(PCODE pCode, PCODE pExpected = NULL); #endif enum OptimizationTier { OptimizationTier0, OptimizationTier1, OptimizationTier1OSR, OptimizationTierOptimized, // may do less optimizations than tier 1 }; #ifdef FEATURE_TIERED_COMPILATION OptimizationTier GetOptimizationTier() const; #ifndef DACCESS_COMPILE void SetOptimizationTier(OptimizationTier tier); #endif #endif // FEATURE_TIERED_COMPILATION #ifdef FEATURE_ON_STACK_REPLACEMENT PatchpointInfo * GetOSRInfo(unsigned * iloffset); #endif // FEATURE_ON_STACK_REPLACEMENT #ifdef HAVE_GCCOVER PTR_GCCoverageInfo GetGCCoverageInfo() const; void SetGCCoverageInfo(PTR_GCCoverageInfo gcCover); #endif bool operator==(const NativeCodeVersion & rhs) const; bool operator!=(const NativeCodeVersion & rhs) const; #if defined(DACCESS_COMPILE) && defined(FEATURE_CODE_VERSIONING) // The DAC is privy to the backing node abstraction PTR_NativeCodeVersionNode AsNode() const; #endif private: #ifndef FEATURE_CODE_VERSIONING PTR_MethodDesc m_pMethodDesc; #else // FEATURE_CODE_VERSIONING #ifndef DACCESS_COMPILE NativeCodeVersionNode* AsNode() const; NativeCodeVersionNode* AsNode(); void SetActiveChildFlag(BOOL isActive); MethodDescVersioningState* GetMethodDescVersioningState(); #endif BOOL IsActiveChildVersion() const; PTR_MethodDescVersioningState GetMethodDescVersioningState() const; enum StorageKind { Unknown, Explicit, Synthetic }; StorageKind m_storageKind; union { PTR_NativeCodeVersionNode m_pVersionNode; struct { PTR_MethodDesc m_pMethodDesc; } m_synthetic; }; #endif // FEATURE_CODE_VERSIONING }; #ifdef FEATURE_CODE_VERSIONING class ILCodeVersion { friend class NativeCodeVersionIterator; public: ILCodeVersion(); ILCodeVersion(const ILCodeVersion & ilCodeVersion); ILCodeVersion(PTR_ILCodeVersionNode pILCodeVersionNode); ILCodeVersion(PTR_Module pModule, mdMethodDef methodDef); bool operator==(const ILCodeVersion & rhs) const; bool operator!=(const ILCodeVersion & rhs) const; BOOL HasDefaultIL() const; BOOL IsNull() const; BOOL IsDefaultVersion() const; PTR_Module GetModule() const; mdMethodDef GetMethodDef() const; ReJITID GetVersionId() const; NativeCodeVersionCollection GetNativeCodeVersions(PTR_MethodDesc pClosedMethodDesc) const; NativeCodeVersion GetActiveNativeCodeVersion(PTR_MethodDesc pClosedMethodDesc) const; #if defined(FEATURE_TIERED_COMPILATION) && !defined(DACCESS_COMPILE) bool HasAnyOptimizedNativeCodeVersion(NativeCodeVersion tier0NativeCodeVersion) const; #endif PTR_COR_ILMETHOD GetIL() const; DWORD GetJitFlags() const; const InstrumentedILOffsetMapping* GetInstrumentedILMap() const; #ifndef DACCESS_COMPILE void SetIL(COR_ILMETHOD* pIL); void SetJitFlags(DWORD flags); void SetInstrumentedILMap(SIZE_T cMap, COR_IL_MAP * rgMap); HRESULT AddNativeCodeVersion(MethodDesc* pClosedMethodDesc, NativeCodeVersion::OptimizationTier optimizationTier, NativeCodeVersion* pNativeCodeVersion, PatchpointInfo* patchpointInfo = NULL, unsigned ilOffset = 0); HRESULT GetOrCreateActiveNativeCodeVersion(MethodDesc* pClosedMethodDesc, NativeCodeVersion* pNativeCodeVersion); HRESULT SetActiveNativeCodeVersion(NativeCodeVersion activeNativeCodeVersion); #endif //DACCESS_COMPILE enum RejitFlags { // The profiler has requested a ReJit, so we've allocated stuff, but we haven't // called back to the profiler to get any info or indicate that the ReJit has // started. (This Info can be 'reused' for a new ReJit if the // profiler calls RequestRejit again before we transition to the next state.) kStateRequested = 0x00000000, // The CLR has initiated the call to the profiler's GetReJITParameters() callback // but it hasn't completed yet. At this point we have to assume the profiler has // commited to a specific IL body, even if the CLR doesn't know what it is yet. // If the profiler calls RequestRejit we need to allocate a new ILCodeVersion // and call GetReJITParameters() again. kStateGettingReJITParameters = 0x00000001, // We have asked the profiler about this method via ICorProfilerFunctionControl, // and have thus stored the IL and codegen flags the profiler specified. kStateActive = 0x00000002, kStateMask = 0x0000000F, // Indicates that the method being ReJITted is an inliner of the actual // ReJIT request and we should not issue the GetReJITParameters for this // method. kSuppressParams = 0x80000000 }; RejitFlags GetRejitState() const; BOOL GetEnableReJITCallback() const; #ifndef DACCESS_COMPILE void SetRejitState(RejitFlags newState); void SetEnableReJITCallback(BOOL state); #endif #ifdef DACCESS_COMPILE // The DAC is privy to the backing node abstraction PTR_ILCodeVersionNode AsNode() const; #endif private: #ifndef DACCESS_COMPILE PTR_ILCodeVersionNode AsNode(); PTR_ILCodeVersionNode AsNode() const; #endif enum StorageKind { Unknown, Explicit, Synthetic }; StorageKind m_storageKind; union { PTR_ILCodeVersionNode m_pVersionNode; struct { PTR_Module m_pModule; mdMethodDef m_methodDef; } m_synthetic; }; }; class NativeCodeVersionNode { friend NativeCodeVersionIterator; friend MethodDescVersioningState; friend ILCodeVersionNode; public: #ifndef DACCESS_COMPILE NativeCodeVersionNode(NativeCodeVersionId id, MethodDesc* pMethod, ReJITID parentId, NativeCodeVersion::OptimizationTier optimizationTier, PatchpointInfo* patchpointInfo, unsigned ilOffset); #endif PTR_MethodDesc GetMethodDesc() const; NativeCodeVersionId GetVersionId() const; PCODE GetNativeCode() const; ReJITID GetILVersionId() const; ILCodeVersion GetILCodeVersion() const; BOOL IsActiveChildVersion() const; #ifndef DACCESS_COMPILE BOOL SetNativeCodeInterlocked(PCODE pCode, PCODE pExpected); void SetActiveChildFlag(BOOL isActive); #endif #ifdef FEATURE_TIERED_COMPILATION NativeCodeVersion::OptimizationTier GetOptimizationTier() const; #ifndef DACCESS_COMPILE void SetOptimizationTier(NativeCodeVersion::OptimizationTier tier); #endif #endif // FEATURE_TIERED_COMPILATION #ifdef HAVE_GCCOVER PTR_GCCoverageInfo GetGCCoverageInfo() const; void SetGCCoverageInfo(PTR_GCCoverageInfo gcCover); #endif #ifdef FEATURE_ON_STACK_REPLACEMENT PatchpointInfo * GetOSRInfo(unsigned * ilOffset); #endif private: //union - could save a little memory? //{ PCODE m_pNativeCode; PTR_MethodDesc m_pMethodDesc; //}; ReJITID m_parentId; PTR_NativeCodeVersionNode m_pNextMethodDescSibling; NativeCodeVersionId m_id; #ifdef FEATURE_TIERED_COMPILATION NativeCodeVersion::OptimizationTier m_optTier; #endif #ifdef HAVE_GCCOVER PTR_GCCoverageInfo m_gcCover; #endif #ifdef FEATURE_ON_STACK_REPLACEMENT PTR_PatchpointInfo m_patchpointInfo; unsigned m_ilOffset; #endif enum NativeCodeVersionNodeFlags { IsActiveChildFlag = 1 }; DWORD m_flags; }; class NativeCodeVersionCollection { friend class NativeCodeVersionIterator; public: NativeCodeVersionCollection(PTR_MethodDesc pMethodDescFilter, ILCodeVersion ilCodeFilter); NativeCodeVersionIterator Begin(); NativeCodeVersionIterator End(); private: PTR_MethodDesc m_pMethodDescFilter; ILCodeVersion m_ilCodeFilter; }; class NativeCodeVersionIterator : public Enumerator<const NativeCodeVersion, NativeCodeVersionIterator> { friend class Enumerator<const NativeCodeVersion, NativeCodeVersionIterator>; public: NativeCodeVersionIterator(NativeCodeVersionCollection* pCollection); CHECK Check() const { CHECK_OK; } protected: const NativeCodeVersion & Get() const; void First(); void Next(); bool Equal(const NativeCodeVersionIterator &i) const; CHECK DoCheck() const { CHECK_OK; } private: enum IterationStage { Initial, ImplicitCodeVersion, LinkedList, End }; IterationStage m_stage; NativeCodeVersionCollection* m_pCollection; PTR_NativeCodeVersionNode m_pLinkedListCur; NativeCodeVersion m_cur; }; class ILCodeVersionNode { public: ILCodeVersionNode(); #ifndef DACCESS_COMPILE ILCodeVersionNode(Module* pModule, mdMethodDef methodDef, ReJITID id); #endif PTR_Module GetModule() const; mdMethodDef GetMethodDef() const; ReJITID GetVersionId() const; PTR_COR_ILMETHOD GetIL() const; DWORD GetJitFlags() const; const InstrumentedILOffsetMapping* GetInstrumentedILMap() const; ILCodeVersion::RejitFlags GetRejitState() const; BOOL GetEnableReJITCallback() const; PTR_ILCodeVersionNode GetNextILVersionNode() const; #ifndef DACCESS_COMPILE void SetIL(COR_ILMETHOD* pIL); void SetJitFlags(DWORD flags); void SetInstrumentedILMap(SIZE_T cMap, COR_IL_MAP * rgMap); void SetRejitState(ILCodeVersion::RejitFlags newState); void SetEnableReJITCallback(BOOL state); void SetNextILVersionNode(ILCodeVersionNode* pNextVersionNode); #endif private: PTR_Module m_pModule; mdMethodDef m_methodDef; ReJITID m_rejitId; PTR_ILCodeVersionNode m_pNextILVersionNode; Volatile<ILCodeVersion::RejitFlags> m_rejitState; VolatilePtr<COR_ILMETHOD, PTR_COR_ILMETHOD> m_pIL; Volatile<DWORD> m_jitFlags; InstrumentedILOffsetMapping m_instrumentedILMap; }; class ILCodeVersionCollection { friend class ILCodeVersionIterator; public: ILCodeVersionCollection(PTR_Module pModule, mdMethodDef methodDef); ILCodeVersionIterator Begin(); ILCodeVersionIterator End(); private: PTR_Module m_pModule; mdMethodDef m_methodDef; }; class ILCodeVersionIterator : public Enumerator<const ILCodeVersion, ILCodeVersionIterator> { friend class Enumerator<const ILCodeVersion, ILCodeVersionIterator>; public: ILCodeVersionIterator(); ILCodeVersionIterator(const ILCodeVersionIterator & iter); ILCodeVersionIterator(ILCodeVersionCollection* pCollection); CHECK Check() const { CHECK_OK; } protected: const ILCodeVersion & Get() const; void First(); void Next(); bool Equal(const ILCodeVersionIterator &i) const; CHECK DoCheck() const { CHECK_OK; } private: enum IterationStage { Initial, ImplicitCodeVersion, LinkedList, End }; IterationStage m_stage; ILCodeVersion m_cur; PTR_ILCodeVersionNode m_pLinkedListCur; ILCodeVersionCollection* m_pCollection; }; class MethodDescVersioningState { public: MethodDescVersioningState(PTR_MethodDesc pMethodDesc); PTR_MethodDesc GetMethodDesc() const; NativeCodeVersionId AllocateVersionId(); PTR_NativeCodeVersionNode GetFirstVersionNode() const; #ifndef DACCESS_COMPILE void LinkNativeCodeVersionNode(NativeCodeVersionNode* pNativeCodeVersionNode); #endif // DACCESS_COMPILE //read-write data for the default native code version BOOL IsDefaultVersionActiveChild() const; #ifndef DACCESS_COMPILE void SetDefaultVersionActiveChildFlag(BOOL isActive); #endif private: PTR_MethodDesc m_pMethodDesc; enum MethodDescVersioningStateFlags { IsDefaultVersionActiveChildFlag = 0x4 }; BYTE m_flags; NativeCodeVersionId m_nextId; PTR_NativeCodeVersionNode m_pFirstVersionNode; }; class MethodDescVersioningStateHashTraits : public NoRemoveSHashTraits<DefaultSHashTraits<PTR_MethodDescVersioningState>> { public: typedef typename DefaultSHashTraits<PTR_MethodDescVersioningState>::element_t element_t; typedef typename DefaultSHashTraits<PTR_MethodDescVersioningState>::count_t count_t; typedef const PTR_MethodDesc key_t; static key_t GetKey(element_t e) { LIMITED_METHOD_CONTRACT; return e->GetMethodDesc(); } static BOOL Equals(key_t k1, key_t k2) { LIMITED_METHOD_CONTRACT; return k1 == k2; } static count_t Hash(key_t k) { LIMITED_METHOD_CONTRACT; return (count_t)dac_cast<TADDR>(k); } static element_t Null() { LIMITED_METHOD_CONTRACT; return dac_cast<PTR_MethodDescVersioningState>(nullptr); } static bool IsNull(const element_t &e) { LIMITED_METHOD_CONTRACT; return e == NULL; } }; typedef SHash<MethodDescVersioningStateHashTraits> MethodDescVersioningStateHash; class ILCodeVersioningState { public: ILCodeVersioningState(PTR_Module pModule, mdMethodDef methodDef); ILCodeVersion GetActiveVersion() const; PTR_ILCodeVersionNode GetFirstVersionNode() const; #ifndef DACCESS_COMPILE void SetActiveVersion(ILCodeVersion ilActiveCodeVersion); void LinkILCodeVersionNode(ILCodeVersionNode* pILCodeVersionNode); #endif struct Key { public: Key(); Key(PTR_Module pModule, mdMethodDef methodDef); size_t Hash() const; bool operator==(const Key & rhs) const; private: PTR_Module m_pModule; mdMethodDef m_methodDef; }; Key GetKey() const; private: ILCodeVersion m_activeVersion; PTR_ILCodeVersionNode m_pFirstVersionNode; PTR_Module m_pModule; mdMethodDef m_methodDef; }; class ILCodeVersioningStateHashTraits : public NoRemoveSHashTraits<DefaultSHashTraits<PTR_ILCodeVersioningState>> { public: typedef typename DefaultSHashTraits<PTR_ILCodeVersioningState>::element_t element_t; typedef typename DefaultSHashTraits<PTR_ILCodeVersioningState>::count_t count_t; typedef const ILCodeVersioningState::Key key_t; static key_t GetKey(element_t e) { LIMITED_METHOD_CONTRACT; return e->GetKey(); } static BOOL Equals(key_t k1, key_t k2) { LIMITED_METHOD_CONTRACT; return k1 == k2; } static count_t Hash(key_t k) { LIMITED_METHOD_CONTRACT; return (count_t)k.Hash(); } static element_t Null() { LIMITED_METHOD_CONTRACT; return dac_cast<PTR_ILCodeVersioningState>(nullptr); } static bool IsNull(const element_t &e) { LIMITED_METHOD_CONTRACT; return e == NULL; } }; typedef SHash<ILCodeVersioningStateHashTraits> ILCodeVersioningStateHash; class CodeVersionManager { friend class ILCodeVersion; public: CodeVersionManager(); DWORD GetNonDefaultILVersionCount(); ILCodeVersionCollection GetILCodeVersions(PTR_MethodDesc pMethod); ILCodeVersionCollection GetILCodeVersions(PTR_Module pModule, mdMethodDef methodDef); ILCodeVersion GetActiveILCodeVersion(PTR_MethodDesc pMethod); ILCodeVersion GetActiveILCodeVersion(PTR_Module pModule, mdMethodDef methodDef); ILCodeVersion GetILCodeVersion(PTR_MethodDesc pMethod, ReJITID rejitId); NativeCodeVersionCollection GetNativeCodeVersions(PTR_MethodDesc pMethod) const; NativeCodeVersion GetNativeCodeVersion(PTR_MethodDesc pMethod, PCODE codeStartAddress) const; PTR_ILCodeVersioningState GetILCodeVersioningState(PTR_Module pModule, mdMethodDef methodDef) const; PTR_MethodDescVersioningState GetMethodDescVersioningState(PTR_MethodDesc pMethod) const; #ifndef DACCESS_COMPILE struct CodePublishError { Module* pModule; mdMethodDef methodDef; MethodDesc* pMethodDesc; HRESULT hrStatus; }; HRESULT AddILCodeVersion(Module* pModule, mdMethodDef methodDef, ReJITID rejitId, ILCodeVersion* pILCodeVersion); HRESULT AddNativeCodeVersion(ILCodeVersion ilCodeVersion, MethodDesc* pClosedMethodDesc, NativeCodeVersion::OptimizationTier optimizationTier, NativeCodeVersion* pNativeCodeVersion, PatchpointInfo* patchpointInfo = NULL, unsigned ilOffset = 0); PCODE PublishVersionableCodeIfNecessary( MethodDesc* pMethodDesc, CallerGCMode callerGCMode, bool *doBackpatchRef, bool *doFullBackpatchRef); HRESULT PublishNativeCodeVersion(MethodDesc* pMethodDesc, NativeCodeVersion nativeCodeVersion); HRESULT GetOrCreateMethodDescVersioningState(MethodDesc* pMethod, MethodDescVersioningState** ppMethodDescVersioningState); HRESULT GetOrCreateILCodeVersioningState(Module* pModule, mdMethodDef methodDef, ILCodeVersioningState** ppILCodeVersioningState); HRESULT SetActiveILCodeVersions(ILCodeVersion* pActiveVersions, DWORD cActiveVersions, CDynArray<CodePublishError> * pPublishErrors); static HRESULT AddCodePublishError(Module* pModule, mdMethodDef methodDef, MethodDesc* pMD, HRESULT hrStatus, CDynArray<CodePublishError> * pErrors); static HRESULT AddCodePublishError(NativeCodeVersion nativeCodeVersion, HRESULT hrStatus, CDynArray<CodePublishError> * pErrors); static void OnAppDomainExit(AppDomain* pAppDomain); #endif static bool IsMethodSupported(PTR_MethodDesc pMethodDesc); #ifndef DACCESS_COMPILE static bool InitialNativeCodeVersionMayNotBeTheDefaultNativeCodeVersion() { LIMITED_METHOD_CONTRACT; return s_initialNativeCodeVersionMayNotBeTheDefaultNativeCodeVersion; } static void SetInitialNativeCodeVersionMayNotBeTheDefaultNativeCodeVersion() { LIMITED_METHOD_CONTRACT; s_initialNativeCodeVersionMayNotBeTheDefaultNativeCodeVersion = true; } #endif private: #ifndef DACCESS_COMPILE static HRESULT EnumerateClosedMethodDescs(MethodDesc* pMD, CDynArray<MethodDesc*> * pClosedMethodDescs, CDynArray<CodePublishError> * pUnsupportedMethodErrors); static HRESULT EnumerateDomainClosedMethodDescs( AppDomain * pAppDomainToSearch, Module* pModuleContainingMethodDef, mdMethodDef methodDef, CDynArray<MethodDesc*> * pClosedMethodDescs, CDynArray<CodePublishError> * pUnsupportedMethodErrors); static HRESULT GetNonVersionableError(MethodDesc* pMD); void ReportCodePublishError(CodePublishError* pErrorRecord); void ReportCodePublishError(MethodDesc* pMD, HRESULT hrStatus); void ReportCodePublishError(Module* pModule, mdMethodDef methodDef, MethodDesc* pMD, HRESULT hrStatus); static bool s_initialNativeCodeVersionMayNotBeTheDefaultNativeCodeVersion; #endif //Module,MethodDef -> ILCodeVersioningState ILCodeVersioningStateHash m_ilCodeVersioningStateMap; //closed MethodDesc -> MethodDescVersioningState MethodDescVersioningStateHash m_methodDescVersioningStateMap; private: static CrstStatic s_lock; #ifndef DACCESS_COMPILE public: static void StaticInitialize() { WRAPPER_NO_CONTRACT; s_lock.Init( CrstCodeVersioning, CrstFlags(CRST_UNSAFE_ANYMODE | CRST_DEBUGGER_THREAD | CRST_REENTRANCY | CRST_TAKEN_DURING_SHUTDOWN)); } #endif #ifdef _DEBUG public: static bool IsLockOwnedByCurrentThread(); #endif public: class LockHolder : private CrstHolderWithState { public: LockHolder() #ifndef DACCESS_COMPILE : CrstHolderWithState(&s_lock) #else : CrstHolderWithState(nullptr) #endif { WRAPPER_NO_CONTRACT; } LockHolder(const LockHolder &) = delete; LockHolder &operator =(const LockHolder &) = delete; }; }; #endif // FEATURE_CODE_VERSIONING //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // NativeCodeVersion definitions inline NativeCodeVersion::NativeCodeVersion() #ifdef FEATURE_CODE_VERSIONING : m_storageKind(StorageKind::Unknown), m_pVersionNode(PTR_NULL) #else : m_pMethodDesc(PTR_NULL) #endif { LIMITED_METHOD_DAC_CONTRACT; #ifdef FEATURE_CODE_VERSIONING static_assert_no_msg(sizeof(m_pVersionNode) == sizeof(m_synthetic)); #endif } inline NativeCodeVersion::NativeCodeVersion(const NativeCodeVersion & rhs) #ifdef FEATURE_CODE_VERSIONING : m_storageKind(rhs.m_storageKind), m_pVersionNode(rhs.m_pVersionNode) #else : m_pMethodDesc(rhs.m_pMethodDesc) #endif { LIMITED_METHOD_DAC_CONTRACT; #ifdef FEATURE_CODE_VERSIONING static_assert_no_msg(sizeof(m_pVersionNode) == sizeof(m_synthetic)); #endif } inline BOOL NativeCodeVersion::IsNull() const { LIMITED_METHOD_DAC_CONTRACT; #ifdef FEATURE_CODE_VERSIONING return m_storageKind == StorageKind::Unknown; #else return m_pMethodDesc == NULL; #endif } inline PTR_MethodDesc NativeCodeVersion::GetMethodDesc() const { LIMITED_METHOD_DAC_CONTRACT; #ifdef FEATURE_CODE_VERSIONING return m_storageKind == StorageKind::Explicit ? m_pVersionNode->GetMethodDesc() : m_synthetic.m_pMethodDesc; #else return m_pMethodDesc; #endif } inline NativeCodeVersionId NativeCodeVersion::GetVersionId() const { LIMITED_METHOD_DAC_CONTRACT; #ifdef FEATURE_CODE_VERSIONING if (m_storageKind == StorageKind::Explicit) { return m_pVersionNode->GetVersionId(); } #endif return 0; } inline bool NativeCodeVersion::operator==(const NativeCodeVersion & rhs) const { LIMITED_METHOD_DAC_CONTRACT; #ifdef FEATURE_CODE_VERSIONING static_assert_no_msg(sizeof(m_pVersionNode) == sizeof(m_synthetic)); return m_storageKind == rhs.m_storageKind && m_pVersionNode == rhs.m_pVersionNode; #else return m_pMethodDesc == rhs.m_pMethodDesc; #endif } inline bool NativeCodeVersion::operator!=(const NativeCodeVersion & rhs) const { WRAPPER_NO_CONTRACT; return !operator==(rhs); } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // NativeCodeVersionNode definitions #ifdef FEATURE_CODE_VERSIONING inline PTR_MethodDesc NativeCodeVersionNode::GetMethodDesc() const { LIMITED_METHOD_DAC_CONTRACT; return m_pMethodDesc; } #endif // FEATURE_CODE_VERSIONING #endif // CODE_VERSION_H
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // =========================================================================== // File: CodeVersion.h // // =========================================================================== #ifndef CODE_VERSION_H #define CODE_VERSION_H class ILCodeVersion; typedef DWORD NativeCodeVersionId; #ifdef FEATURE_CODE_VERSIONING class NativeCodeVersionNode; typedef DPTR(class NativeCodeVersionNode) PTR_NativeCodeVersionNode; class NativeCodeVersionCollection; class NativeCodeVersionIterator; class ILCodeVersionNode; typedef DPTR(class ILCodeVersionNode) PTR_ILCodeVersionNode; class ILCodeVersionCollection; class ILCodeVersionIterator; class MethodDescVersioningState; typedef DPTR(class MethodDescVersioningState) PTR_MethodDescVersioningState; class ILCodeVersioningState; typedef DPTR(class ILCodeVersioningState) PTR_ILCodeVersioningState; class CodeVersionManager; typedef DPTR(class CodeVersionManager) PTR_CodeVersionManager; #endif #ifdef HAVE_GCCOVER class GCCoverageInfo; typedef DPTR(class GCCoverageInfo) PTR_GCCoverageInfo; #endif #ifdef FEATURE_ON_STACK_REPLACEMENT struct PatchpointInfo; typedef DPTR(struct PatchpointInfo) PTR_PatchpointInfo; #endif class NativeCodeVersion { #ifdef FEATURE_CODE_VERSIONING friend class MethodDescVersioningState; friend class ILCodeVersion; #endif public: NativeCodeVersion(); NativeCodeVersion(const NativeCodeVersion & rhs); #ifdef FEATURE_CODE_VERSIONING NativeCodeVersion(PTR_NativeCodeVersionNode pVersionNode); #endif explicit NativeCodeVersion(PTR_MethodDesc pMethod); BOOL IsNull() const; PTR_MethodDesc GetMethodDesc() const; NativeCodeVersionId GetVersionId() const; BOOL IsDefaultVersion() const; PCODE GetNativeCode() const; #ifdef FEATURE_CODE_VERSIONING ILCodeVersion GetILCodeVersion() const; ReJITID GetILCodeVersionId() const; #endif #ifndef DACCESS_COMPILE BOOL SetNativeCodeInterlocked(PCODE pCode, PCODE pExpected = NULL); #endif enum OptimizationTier { OptimizationTier0, OptimizationTier1, OptimizationTier1OSR, OptimizationTierOptimized, // may do less optimizations than tier 1 }; #ifdef FEATURE_TIERED_COMPILATION OptimizationTier GetOptimizationTier() const; #ifndef DACCESS_COMPILE void SetOptimizationTier(OptimizationTier tier); #endif #endif // FEATURE_TIERED_COMPILATION #ifdef FEATURE_ON_STACK_REPLACEMENT PatchpointInfo * GetOSRInfo(unsigned * iloffset); #endif // FEATURE_ON_STACK_REPLACEMENT #ifdef HAVE_GCCOVER PTR_GCCoverageInfo GetGCCoverageInfo() const; void SetGCCoverageInfo(PTR_GCCoverageInfo gcCover); #endif bool operator==(const NativeCodeVersion & rhs) const; bool operator!=(const NativeCodeVersion & rhs) const; #if defined(DACCESS_COMPILE) && defined(FEATURE_CODE_VERSIONING) // The DAC is privy to the backing node abstraction PTR_NativeCodeVersionNode AsNode() const; #endif private: #ifndef FEATURE_CODE_VERSIONING PTR_MethodDesc m_pMethodDesc; #else // FEATURE_CODE_VERSIONING #ifndef DACCESS_COMPILE NativeCodeVersionNode* AsNode() const; NativeCodeVersionNode* AsNode(); void SetActiveChildFlag(BOOL isActive); MethodDescVersioningState* GetMethodDescVersioningState(); #endif BOOL IsActiveChildVersion() const; PTR_MethodDescVersioningState GetMethodDescVersioningState() const; enum StorageKind { Unknown, Explicit, Synthetic }; StorageKind m_storageKind; union { PTR_NativeCodeVersionNode m_pVersionNode; struct { PTR_MethodDesc m_pMethodDesc; } m_synthetic; }; #endif // FEATURE_CODE_VERSIONING }; #ifdef FEATURE_CODE_VERSIONING class ILCodeVersion { friend class NativeCodeVersionIterator; public: ILCodeVersion(); ILCodeVersion(const ILCodeVersion & ilCodeVersion); ILCodeVersion(PTR_ILCodeVersionNode pILCodeVersionNode); ILCodeVersion(PTR_Module pModule, mdMethodDef methodDef); bool operator==(const ILCodeVersion & rhs) const; bool operator!=(const ILCodeVersion & rhs) const; BOOL HasDefaultIL() const; BOOL IsNull() const; BOOL IsDefaultVersion() const; PTR_Module GetModule() const; mdMethodDef GetMethodDef() const; ReJITID GetVersionId() const; NativeCodeVersionCollection GetNativeCodeVersions(PTR_MethodDesc pClosedMethodDesc) const; NativeCodeVersion GetActiveNativeCodeVersion(PTR_MethodDesc pClosedMethodDesc) const; #if defined(FEATURE_TIERED_COMPILATION) && !defined(DACCESS_COMPILE) bool HasAnyOptimizedNativeCodeVersion(NativeCodeVersion tier0NativeCodeVersion) const; #endif PTR_COR_ILMETHOD GetIL() const; DWORD GetJitFlags() const; const InstrumentedILOffsetMapping* GetInstrumentedILMap() const; #ifndef DACCESS_COMPILE void SetIL(COR_ILMETHOD* pIL); void SetJitFlags(DWORD flags); void SetInstrumentedILMap(SIZE_T cMap, COR_IL_MAP * rgMap); HRESULT AddNativeCodeVersion(MethodDesc* pClosedMethodDesc, NativeCodeVersion::OptimizationTier optimizationTier, NativeCodeVersion* pNativeCodeVersion, PatchpointInfo* patchpointInfo = NULL, unsigned ilOffset = 0); HRESULT GetOrCreateActiveNativeCodeVersion(MethodDesc* pClosedMethodDesc, NativeCodeVersion* pNativeCodeVersion); HRESULT SetActiveNativeCodeVersion(NativeCodeVersion activeNativeCodeVersion); #endif //DACCESS_COMPILE enum RejitFlags { // The profiler has requested a ReJit, so we've allocated stuff, but we haven't // called back to the profiler to get any info or indicate that the ReJit has // started. (This Info can be 'reused' for a new ReJit if the // profiler calls RequestRejit again before we transition to the next state.) kStateRequested = 0x00000000, // The CLR has initiated the call to the profiler's GetReJITParameters() callback // but it hasn't completed yet. At this point we have to assume the profiler has // commited to a specific IL body, even if the CLR doesn't know what it is yet. // If the profiler calls RequestRejit we need to allocate a new ILCodeVersion // and call GetReJITParameters() again. kStateGettingReJITParameters = 0x00000001, // We have asked the profiler about this method via ICorProfilerFunctionControl, // and have thus stored the IL and codegen flags the profiler specified. kStateActive = 0x00000002, kStateMask = 0x0000000F, // Indicates that the method being ReJITted is an inliner of the actual // ReJIT request and we should not issue the GetReJITParameters for this // method. kSuppressParams = 0x80000000 }; RejitFlags GetRejitState() const; BOOL GetEnableReJITCallback() const; #ifndef DACCESS_COMPILE void SetRejitState(RejitFlags newState); void SetEnableReJITCallback(BOOL state); #endif #ifdef DACCESS_COMPILE // The DAC is privy to the backing node abstraction PTR_ILCodeVersionNode AsNode() const; #endif private: #ifndef DACCESS_COMPILE PTR_ILCodeVersionNode AsNode(); PTR_ILCodeVersionNode AsNode() const; #endif enum StorageKind { Unknown, Explicit, Synthetic }; StorageKind m_storageKind; union { PTR_ILCodeVersionNode m_pVersionNode; struct { PTR_Module m_pModule; mdMethodDef m_methodDef; } m_synthetic; }; }; class NativeCodeVersionNode { friend NativeCodeVersionIterator; friend MethodDescVersioningState; friend ILCodeVersionNode; public: #ifndef DACCESS_COMPILE NativeCodeVersionNode(NativeCodeVersionId id, MethodDesc* pMethod, ReJITID parentId, NativeCodeVersion::OptimizationTier optimizationTier, PatchpointInfo* patchpointInfo, unsigned ilOffset); #endif PTR_MethodDesc GetMethodDesc() const; NativeCodeVersionId GetVersionId() const; PCODE GetNativeCode() const; ReJITID GetILVersionId() const; ILCodeVersion GetILCodeVersion() const; BOOL IsActiveChildVersion() const; #ifndef DACCESS_COMPILE BOOL SetNativeCodeInterlocked(PCODE pCode, PCODE pExpected); void SetActiveChildFlag(BOOL isActive); #endif #ifdef FEATURE_TIERED_COMPILATION NativeCodeVersion::OptimizationTier GetOptimizationTier() const; #ifndef DACCESS_COMPILE void SetOptimizationTier(NativeCodeVersion::OptimizationTier tier); #endif #endif // FEATURE_TIERED_COMPILATION #ifdef HAVE_GCCOVER PTR_GCCoverageInfo GetGCCoverageInfo() const; void SetGCCoverageInfo(PTR_GCCoverageInfo gcCover); #endif #ifdef FEATURE_ON_STACK_REPLACEMENT PatchpointInfo * GetOSRInfo(unsigned * ilOffset); #endif private: //union - could save a little memory? //{ PCODE m_pNativeCode; PTR_MethodDesc m_pMethodDesc; //}; ReJITID m_parentId; PTR_NativeCodeVersionNode m_pNextMethodDescSibling; NativeCodeVersionId m_id; #ifdef FEATURE_TIERED_COMPILATION NativeCodeVersion::OptimizationTier m_optTier; #endif #ifdef HAVE_GCCOVER PTR_GCCoverageInfo m_gcCover; #endif #ifdef FEATURE_ON_STACK_REPLACEMENT PTR_PatchpointInfo m_patchpointInfo; unsigned m_ilOffset; #endif enum NativeCodeVersionNodeFlags { IsActiveChildFlag = 1 }; DWORD m_flags; }; class NativeCodeVersionCollection { friend class NativeCodeVersionIterator; public: NativeCodeVersionCollection(PTR_MethodDesc pMethodDescFilter, ILCodeVersion ilCodeFilter); NativeCodeVersionIterator Begin(); NativeCodeVersionIterator End(); private: PTR_MethodDesc m_pMethodDescFilter; ILCodeVersion m_ilCodeFilter; }; class NativeCodeVersionIterator : public Enumerator<const NativeCodeVersion, NativeCodeVersionIterator> { friend class Enumerator<const NativeCodeVersion, NativeCodeVersionIterator>; public: NativeCodeVersionIterator(NativeCodeVersionCollection* pCollection); CHECK Check() const { CHECK_OK; } protected: const NativeCodeVersion & Get() const; void First(); void Next(); bool Equal(const NativeCodeVersionIterator &i) const; CHECK DoCheck() const { CHECK_OK; } private: enum IterationStage { Initial, ImplicitCodeVersion, LinkedList, End }; IterationStage m_stage; NativeCodeVersionCollection* m_pCollection; PTR_NativeCodeVersionNode m_pLinkedListCur; NativeCodeVersion m_cur; }; class ILCodeVersionNode { public: ILCodeVersionNode(); #ifndef DACCESS_COMPILE ILCodeVersionNode(Module* pModule, mdMethodDef methodDef, ReJITID id); #endif PTR_Module GetModule() const; mdMethodDef GetMethodDef() const; ReJITID GetVersionId() const; PTR_COR_ILMETHOD GetIL() const; DWORD GetJitFlags() const; const InstrumentedILOffsetMapping* GetInstrumentedILMap() const; ILCodeVersion::RejitFlags GetRejitState() const; BOOL GetEnableReJITCallback() const; PTR_ILCodeVersionNode GetNextILVersionNode() const; #ifndef DACCESS_COMPILE void SetIL(COR_ILMETHOD* pIL); void SetJitFlags(DWORD flags); void SetInstrumentedILMap(SIZE_T cMap, COR_IL_MAP * rgMap); void SetRejitState(ILCodeVersion::RejitFlags newState); void SetEnableReJITCallback(BOOL state); void SetNextILVersionNode(ILCodeVersionNode* pNextVersionNode); #endif private: PTR_Module m_pModule; mdMethodDef m_methodDef; ReJITID m_rejitId; PTR_ILCodeVersionNode m_pNextILVersionNode; Volatile<ILCodeVersion::RejitFlags> m_rejitState; VolatilePtr<COR_ILMETHOD, PTR_COR_ILMETHOD> m_pIL; Volatile<DWORD> m_jitFlags; InstrumentedILOffsetMapping m_instrumentedILMap; }; class ILCodeVersionCollection { friend class ILCodeVersionIterator; public: ILCodeVersionCollection(PTR_Module pModule, mdMethodDef methodDef); ILCodeVersionIterator Begin(); ILCodeVersionIterator End(); private: PTR_Module m_pModule; mdMethodDef m_methodDef; }; class ILCodeVersionIterator : public Enumerator<const ILCodeVersion, ILCodeVersionIterator> { friend class Enumerator<const ILCodeVersion, ILCodeVersionIterator>; public: ILCodeVersionIterator(); ILCodeVersionIterator(const ILCodeVersionIterator & iter); ILCodeVersionIterator(ILCodeVersionCollection* pCollection); CHECK Check() const { CHECK_OK; } protected: const ILCodeVersion & Get() const; void First(); void Next(); bool Equal(const ILCodeVersionIterator &i) const; CHECK DoCheck() const { CHECK_OK; } private: enum IterationStage { Initial, ImplicitCodeVersion, LinkedList, End }; IterationStage m_stage; ILCodeVersion m_cur; PTR_ILCodeVersionNode m_pLinkedListCur; ILCodeVersionCollection* m_pCollection; }; class MethodDescVersioningState { public: MethodDescVersioningState(PTR_MethodDesc pMethodDesc); PTR_MethodDesc GetMethodDesc() const; NativeCodeVersionId AllocateVersionId(); PTR_NativeCodeVersionNode GetFirstVersionNode() const; #ifndef DACCESS_COMPILE void LinkNativeCodeVersionNode(NativeCodeVersionNode* pNativeCodeVersionNode); #endif // DACCESS_COMPILE //read-write data for the default native code version BOOL IsDefaultVersionActiveChild() const; #ifndef DACCESS_COMPILE void SetDefaultVersionActiveChildFlag(BOOL isActive); #endif private: PTR_MethodDesc m_pMethodDesc; enum MethodDescVersioningStateFlags { IsDefaultVersionActiveChildFlag = 0x4 }; BYTE m_flags; NativeCodeVersionId m_nextId; PTR_NativeCodeVersionNode m_pFirstVersionNode; }; class MethodDescVersioningStateHashTraits : public NoRemoveSHashTraits<DefaultSHashTraits<PTR_MethodDescVersioningState>> { public: typedef typename DefaultSHashTraits<PTR_MethodDescVersioningState>::element_t element_t; typedef typename DefaultSHashTraits<PTR_MethodDescVersioningState>::count_t count_t; typedef const PTR_MethodDesc key_t; static key_t GetKey(element_t e) { LIMITED_METHOD_CONTRACT; return e->GetMethodDesc(); } static BOOL Equals(key_t k1, key_t k2) { LIMITED_METHOD_CONTRACT; return k1 == k2; } static count_t Hash(key_t k) { LIMITED_METHOD_CONTRACT; return (count_t)dac_cast<TADDR>(k); } static element_t Null() { LIMITED_METHOD_CONTRACT; return dac_cast<PTR_MethodDescVersioningState>(nullptr); } static bool IsNull(const element_t &e) { LIMITED_METHOD_CONTRACT; return e == NULL; } }; typedef SHash<MethodDescVersioningStateHashTraits> MethodDescVersioningStateHash; class ILCodeVersioningState { public: ILCodeVersioningState(PTR_Module pModule, mdMethodDef methodDef); ILCodeVersion GetActiveVersion() const; PTR_ILCodeVersionNode GetFirstVersionNode() const; #ifndef DACCESS_COMPILE void SetActiveVersion(ILCodeVersion ilActiveCodeVersion); void LinkILCodeVersionNode(ILCodeVersionNode* pILCodeVersionNode); #endif struct Key { public: Key(); Key(PTR_Module pModule, mdMethodDef methodDef); size_t Hash() const; bool operator==(const Key & rhs) const; private: PTR_Module m_pModule; mdMethodDef m_methodDef; }; Key GetKey() const; private: ILCodeVersion m_activeVersion; PTR_ILCodeVersionNode m_pFirstVersionNode; PTR_Module m_pModule; mdMethodDef m_methodDef; }; class ILCodeVersioningStateHashTraits : public NoRemoveSHashTraits<DefaultSHashTraits<PTR_ILCodeVersioningState>> { public: typedef typename DefaultSHashTraits<PTR_ILCodeVersioningState>::element_t element_t; typedef typename DefaultSHashTraits<PTR_ILCodeVersioningState>::count_t count_t; typedef const ILCodeVersioningState::Key key_t; static key_t GetKey(element_t e) { LIMITED_METHOD_CONTRACT; return e->GetKey(); } static BOOL Equals(key_t k1, key_t k2) { LIMITED_METHOD_CONTRACT; return k1 == k2; } static count_t Hash(key_t k) { LIMITED_METHOD_CONTRACT; return (count_t)k.Hash(); } static element_t Null() { LIMITED_METHOD_CONTRACT; return dac_cast<PTR_ILCodeVersioningState>(nullptr); } static bool IsNull(const element_t &e) { LIMITED_METHOD_CONTRACT; return e == NULL; } }; typedef SHash<ILCodeVersioningStateHashTraits> ILCodeVersioningStateHash; class CodeVersionManager { friend class ILCodeVersion; public: CodeVersionManager(); DWORD GetNonDefaultILVersionCount(); ILCodeVersionCollection GetILCodeVersions(PTR_MethodDesc pMethod); ILCodeVersionCollection GetILCodeVersions(PTR_Module pModule, mdMethodDef methodDef); ILCodeVersion GetActiveILCodeVersion(PTR_MethodDesc pMethod); ILCodeVersion GetActiveILCodeVersion(PTR_Module pModule, mdMethodDef methodDef); ILCodeVersion GetILCodeVersion(PTR_MethodDesc pMethod, ReJITID rejitId); NativeCodeVersionCollection GetNativeCodeVersions(PTR_MethodDesc pMethod) const; NativeCodeVersion GetNativeCodeVersion(PTR_MethodDesc pMethod, PCODE codeStartAddress) const; PTR_ILCodeVersioningState GetILCodeVersioningState(PTR_Module pModule, mdMethodDef methodDef) const; PTR_MethodDescVersioningState GetMethodDescVersioningState(PTR_MethodDesc pMethod) const; #ifndef DACCESS_COMPILE struct CodePublishError { Module* pModule; mdMethodDef methodDef; MethodDesc* pMethodDesc; HRESULT hrStatus; }; HRESULT AddILCodeVersion(Module* pModule, mdMethodDef methodDef, ReJITID rejitId, ILCodeVersion* pILCodeVersion); HRESULT AddNativeCodeVersion(ILCodeVersion ilCodeVersion, MethodDesc* pClosedMethodDesc, NativeCodeVersion::OptimizationTier optimizationTier, NativeCodeVersion* pNativeCodeVersion, PatchpointInfo* patchpointInfo = NULL, unsigned ilOffset = 0); PCODE PublishVersionableCodeIfNecessary( MethodDesc* pMethodDesc, CallerGCMode callerGCMode, bool *doBackpatchRef, bool *doFullBackpatchRef); HRESULT PublishNativeCodeVersion(MethodDesc* pMethodDesc, NativeCodeVersion nativeCodeVersion); HRESULT GetOrCreateMethodDescVersioningState(MethodDesc* pMethod, MethodDescVersioningState** ppMethodDescVersioningState); HRESULT GetOrCreateILCodeVersioningState(Module* pModule, mdMethodDef methodDef, ILCodeVersioningState** ppILCodeVersioningState); HRESULT SetActiveILCodeVersions(ILCodeVersion* pActiveVersions, DWORD cActiveVersions, CDynArray<CodePublishError> * pPublishErrors); static HRESULT AddCodePublishError(Module* pModule, mdMethodDef methodDef, MethodDesc* pMD, HRESULT hrStatus, CDynArray<CodePublishError> * pErrors); static HRESULT AddCodePublishError(NativeCodeVersion nativeCodeVersion, HRESULT hrStatus, CDynArray<CodePublishError> * pErrors); static void OnAppDomainExit(AppDomain* pAppDomain); #endif static bool IsMethodSupported(PTR_MethodDesc pMethodDesc); #ifndef DACCESS_COMPILE static bool InitialNativeCodeVersionMayNotBeTheDefaultNativeCodeVersion() { LIMITED_METHOD_CONTRACT; return s_initialNativeCodeVersionMayNotBeTheDefaultNativeCodeVersion; } static void SetInitialNativeCodeVersionMayNotBeTheDefaultNativeCodeVersion() { LIMITED_METHOD_CONTRACT; s_initialNativeCodeVersionMayNotBeTheDefaultNativeCodeVersion = true; } #endif private: #ifndef DACCESS_COMPILE static HRESULT EnumerateClosedMethodDescs(MethodDesc* pMD, CDynArray<MethodDesc*> * pClosedMethodDescs, CDynArray<CodePublishError> * pUnsupportedMethodErrors); static HRESULT EnumerateDomainClosedMethodDescs( AppDomain * pAppDomainToSearch, Module* pModuleContainingMethodDef, mdMethodDef methodDef, CDynArray<MethodDesc*> * pClosedMethodDescs, CDynArray<CodePublishError> * pUnsupportedMethodErrors); static HRESULT GetNonVersionableError(MethodDesc* pMD); void ReportCodePublishError(CodePublishError* pErrorRecord); void ReportCodePublishError(MethodDesc* pMD, HRESULT hrStatus); void ReportCodePublishError(Module* pModule, mdMethodDef methodDef, MethodDesc* pMD, HRESULT hrStatus); static bool s_initialNativeCodeVersionMayNotBeTheDefaultNativeCodeVersion; #endif //Module,MethodDef -> ILCodeVersioningState ILCodeVersioningStateHash m_ilCodeVersioningStateMap; //closed MethodDesc -> MethodDescVersioningState MethodDescVersioningStateHash m_methodDescVersioningStateMap; private: static CrstStatic s_lock; #ifndef DACCESS_COMPILE public: static void StaticInitialize() { WRAPPER_NO_CONTRACT; s_lock.Init( CrstCodeVersioning, CrstFlags(CRST_UNSAFE_ANYMODE | CRST_DEBUGGER_THREAD | CRST_REENTRANCY | CRST_TAKEN_DURING_SHUTDOWN)); } #endif #ifdef _DEBUG public: static bool IsLockOwnedByCurrentThread(); #endif public: class LockHolder : private CrstHolderWithState { public: LockHolder() #ifndef DACCESS_COMPILE : CrstHolderWithState(&s_lock) #else : CrstHolderWithState(nullptr) #endif { WRAPPER_NO_CONTRACT; } LockHolder(const LockHolder &) = delete; LockHolder &operator =(const LockHolder &) = delete; }; }; #endif // FEATURE_CODE_VERSIONING //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // NativeCodeVersion definitions inline NativeCodeVersion::NativeCodeVersion() #ifdef FEATURE_CODE_VERSIONING : m_storageKind(StorageKind::Unknown), m_pVersionNode(PTR_NULL) #else : m_pMethodDesc(PTR_NULL) #endif { LIMITED_METHOD_DAC_CONTRACT; #ifdef FEATURE_CODE_VERSIONING static_assert_no_msg(sizeof(m_pVersionNode) == sizeof(m_synthetic)); #endif } inline NativeCodeVersion::NativeCodeVersion(const NativeCodeVersion & rhs) #ifdef FEATURE_CODE_VERSIONING : m_storageKind(rhs.m_storageKind), m_pVersionNode(rhs.m_pVersionNode) #else : m_pMethodDesc(rhs.m_pMethodDesc) #endif { LIMITED_METHOD_DAC_CONTRACT; #ifdef FEATURE_CODE_VERSIONING static_assert_no_msg(sizeof(m_pVersionNode) == sizeof(m_synthetic)); #endif } inline BOOL NativeCodeVersion::IsNull() const { LIMITED_METHOD_DAC_CONTRACT; #ifdef FEATURE_CODE_VERSIONING return m_storageKind == StorageKind::Unknown; #else return m_pMethodDesc == NULL; #endif } inline PTR_MethodDesc NativeCodeVersion::GetMethodDesc() const { LIMITED_METHOD_DAC_CONTRACT; #ifdef FEATURE_CODE_VERSIONING return m_storageKind == StorageKind::Explicit ? m_pVersionNode->GetMethodDesc() : m_synthetic.m_pMethodDesc; #else return m_pMethodDesc; #endif } inline NativeCodeVersionId NativeCodeVersion::GetVersionId() const { LIMITED_METHOD_DAC_CONTRACT; #ifdef FEATURE_CODE_VERSIONING if (m_storageKind == StorageKind::Explicit) { return m_pVersionNode->GetVersionId(); } #endif return 0; } inline bool NativeCodeVersion::operator==(const NativeCodeVersion & rhs) const { LIMITED_METHOD_DAC_CONTRACT; #ifdef FEATURE_CODE_VERSIONING static_assert_no_msg(sizeof(m_pVersionNode) == sizeof(m_synthetic)); return m_storageKind == rhs.m_storageKind && m_pVersionNode == rhs.m_pVersionNode; #else return m_pMethodDesc == rhs.m_pMethodDesc; #endif } inline bool NativeCodeVersion::operator!=(const NativeCodeVersion & rhs) const { WRAPPER_NO_CONTRACT; return !operator==(rhs); } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // NativeCodeVersionNode definitions #ifdef FEATURE_CODE_VERSIONING inline PTR_MethodDesc NativeCodeVersionNode::GetMethodDesc() const { LIMITED_METHOD_DAC_CONTRACT; return m_pMethodDesc; } #endif // FEATURE_CODE_VERSIONING #endif // CODE_VERSION_H
-1
dotnet/runtime
66,367
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled
When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
jakobbotsch
2022-03-09T00:01:53Z
2022-03-09T12:59:09Z
c9f7f7389e8e9a00d501aef696333b67d218baac
cef825fd5425203ac28d073bf9fccc33c638f179
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled. When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
./src/mono/mono/metadata/sre-internals.h
/** * \file * Copyright 2016 Microsoft * Licensed under the MIT license. See LICENSE file in the project root for full license information. */ #ifndef __MONO_METADATA_SRE_INTERNALS_H__ #define __MONO_METADATA_SRE_INTERNALS_H__ #include <mono/metadata/object-internals.h> /* Keep in sync with System.Reflection.Emit.AssemblyBuilderAccess */ enum MonoAssemblyBuilderAccess { MonoAssemblyBuilderAccess_Run = 1, /* 0b0001 */ MonoAssemblyBuilderAccess_Save = 2, /* 0b0010 */ MonoAssemblyBuilderAccess_RunAndSave = 3, /* Run | Save */ MonoAssemblyBuilderAccess_ReflectionOnly = 6, /* Refonly | Save */ MonoAssemblyBuilderAccess_RunAndCollect = 9, /* Collect | Run */ }; typedef struct _ArrayMethod ArrayMethod; typedef struct { MonoReflectionILGen *ilgen; MonoReflectionType *rtype; MonoArray *parameters; MonoArray *generic_params; MonoGenericContainer *generic_container; MonoArray *pinfo; MonoArray *opt_types; guint32 attrs; guint32 iattrs; guint32 call_conv; guint32 *table_idx; /* note: it's a pointer */ MonoArray *code; MonoObject *type; MonoString *name; MonoBoolean init_locals; MonoBoolean skip_visibility; MonoArray *return_modreq; MonoArray *return_modopt; MonoArray *param_modreq; MonoArray *param_modopt; MonoMethod *mhandle; guint32 nrefs; gpointer *refs; /* for PInvoke */ int charset, extra_flags, native_cc; MonoString *dll, *dllentry; } ReflectionMethodBuilder; /* FIXME raw pointers to managed objects */ void mono_reflection_emit_init (void); void mono_reflection_dynimage_basic_init (MonoReflectionAssemblyBuilder *assemblyb, MonoError *error); gpointer mono_image_g_malloc0 (MonoImage *image, guint size); #define mono_image_g_malloc0(image, size) (g_cast (mono_image_g_malloc0 ((image), (size)))) gboolean mono_is_sre_type_builder (MonoClass *klass); gboolean mono_is_sre_generic_instance (MonoClass *klass); gboolean mono_is_sre_method_on_tb_inst (MonoClass *klass); gboolean mono_is_sre_ctor_builder (MonoClass *klass); gboolean mono_is_sre_ctor_on_tb_inst (MonoClass *klass); gboolean mono_is_sr_mono_cmethod (MonoClass *klass); gboolean mono_is_sr_mono_property (MonoClass *klass); MonoType* mono_reflection_type_get_handle (MonoReflectionType *ref, MonoError *error); gpointer mono_reflection_resolve_object (MonoImage *image, MonoObject *obj, MonoClass **handle_class, MonoGenericContext *context, MonoError *error); gpointer mono_reflection_resolve_object_handle (MonoImage *image, MonoObjectHandle obj, MonoClass **handle_class, MonoGenericContext *context, MonoError *error); MonoType* mono_type_array_get_and_resolve (MonoArrayHandle array, int idx, MonoError* error); void mono_sre_array_method_free (ArrayMethod *am); gboolean mono_reflection_methodbuilder_from_method_builder (ReflectionMethodBuilder *rmb, MonoReflectionMethodBuilder *mb, MonoError *error); gboolean mono_reflection_methodbuilder_from_ctor_builder (ReflectionMethodBuilder *rmb, MonoReflectionCtorBuilder *mb, MonoError *error); guint32 mono_reflection_resolution_scope_from_image (MonoDynamicImage *assembly, MonoImage *image); guint32 mono_reflection_method_count_clauses (MonoReflectionILGen *ilgen); /* sre-encode */ guint32 mono_dynimage_encode_field_signature (MonoDynamicImage *assembly, MonoReflectionFieldBuilder *fb, MonoError *error); guint32 mono_dynimage_encode_constant (MonoDynamicImage *assembly, MonoObject *val, MonoTypeEnum *ret_type); guint32 mono_dynimage_encode_typedef_or_ref_full (MonoDynamicImage *assembly, MonoType *type, gboolean try_typespec); guint32 mono_image_get_methodref_token (MonoDynamicImage *assembly, MonoMethod *method, gboolean create_typespec); #endif /* __MONO_METADATA_SRE_INTERNALS_H__ */
/** * \file * Copyright 2016 Microsoft * Licensed under the MIT license. See LICENSE file in the project root for full license information. */ #ifndef __MONO_METADATA_SRE_INTERNALS_H__ #define __MONO_METADATA_SRE_INTERNALS_H__ #include <mono/metadata/object-internals.h> /* Keep in sync with System.Reflection.Emit.AssemblyBuilderAccess */ enum MonoAssemblyBuilderAccess { MonoAssemblyBuilderAccess_Run = 1, /* 0b0001 */ MonoAssemblyBuilderAccess_Save = 2, /* 0b0010 */ MonoAssemblyBuilderAccess_RunAndSave = 3, /* Run | Save */ MonoAssemblyBuilderAccess_ReflectionOnly = 6, /* Refonly | Save */ MonoAssemblyBuilderAccess_RunAndCollect = 9, /* Collect | Run */ }; typedef struct _ArrayMethod ArrayMethod; typedef struct { MonoReflectionILGen *ilgen; MonoReflectionType *rtype; MonoArray *parameters; MonoArray *generic_params; MonoGenericContainer *generic_container; MonoArray *pinfo; MonoArray *opt_types; guint32 attrs; guint32 iattrs; guint32 call_conv; guint32 *table_idx; /* note: it's a pointer */ MonoArray *code; MonoObject *type; MonoString *name; MonoBoolean init_locals; MonoBoolean skip_visibility; MonoArray *return_modreq; MonoArray *return_modopt; MonoArray *param_modreq; MonoArray *param_modopt; MonoMethod *mhandle; guint32 nrefs; gpointer *refs; /* for PInvoke */ int charset, extra_flags, native_cc; MonoString *dll, *dllentry; } ReflectionMethodBuilder; /* FIXME raw pointers to managed objects */ void mono_reflection_emit_init (void); void mono_reflection_dynimage_basic_init (MonoReflectionAssemblyBuilder *assemblyb, MonoError *error); gpointer mono_image_g_malloc0 (MonoImage *image, guint size); #define mono_image_g_malloc0(image, size) (g_cast (mono_image_g_malloc0 ((image), (size)))) gboolean mono_is_sre_type_builder (MonoClass *klass); gboolean mono_is_sre_generic_instance (MonoClass *klass); gboolean mono_is_sre_method_on_tb_inst (MonoClass *klass); gboolean mono_is_sre_ctor_builder (MonoClass *klass); gboolean mono_is_sre_ctor_on_tb_inst (MonoClass *klass); gboolean mono_is_sr_mono_cmethod (MonoClass *klass); gboolean mono_is_sr_mono_property (MonoClass *klass); MonoType* mono_reflection_type_get_handle (MonoReflectionType *ref, MonoError *error); gpointer mono_reflection_resolve_object (MonoImage *image, MonoObject *obj, MonoClass **handle_class, MonoGenericContext *context, MonoError *error); gpointer mono_reflection_resolve_object_handle (MonoImage *image, MonoObjectHandle obj, MonoClass **handle_class, MonoGenericContext *context, MonoError *error); MonoType* mono_type_array_get_and_resolve (MonoArrayHandle array, int idx, MonoError* error); void mono_sre_array_method_free (ArrayMethod *am); gboolean mono_reflection_methodbuilder_from_method_builder (ReflectionMethodBuilder *rmb, MonoReflectionMethodBuilder *mb, MonoError *error); gboolean mono_reflection_methodbuilder_from_ctor_builder (ReflectionMethodBuilder *rmb, MonoReflectionCtorBuilder *mb, MonoError *error); guint32 mono_reflection_resolution_scope_from_image (MonoDynamicImage *assembly, MonoImage *image); guint32 mono_reflection_method_count_clauses (MonoReflectionILGen *ilgen); /* sre-encode */ guint32 mono_dynimage_encode_field_signature (MonoDynamicImage *assembly, MonoReflectionFieldBuilder *fb, MonoError *error); guint32 mono_dynimage_encode_constant (MonoDynamicImage *assembly, MonoObject *val, MonoTypeEnum *ret_type); guint32 mono_dynimage_encode_typedef_or_ref_full (MonoDynamicImage *assembly, MonoType *type, gboolean try_typespec); guint32 mono_image_get_methodref_token (MonoDynamicImage *assembly, MonoMethod *method, gboolean create_typespec); #endif /* __MONO_METADATA_SRE_INTERNALS_H__ */
-1
dotnet/runtime
66,367
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled
When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
jakobbotsch
2022-03-09T00:01:53Z
2022-03-09T12:59:09Z
c9f7f7389e8e9a00d501aef696333b67d218baac
cef825fd5425203ac28d073bf9fccc33c638f179
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled. When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
./src/native/libs/System.Security.Cryptography.Native/pal_ecdsa.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "pal_types.h" #include "pal_compiler.h" #include "opensslshim.h" /* Shims the ECDSA_sign method. Returns 1 on success, otherwise 0. */ PALEXPORT int32_t CryptoNative_EcDsaSign(const uint8_t* dgst, int32_t dgstlen, uint8_t* sig, int32_t* siglen, EC_KEY* key); /* Shims the ECDSA_verify method. Returns 1 for a correct signature, 0 for an incorrect signature, -1 on error. */ PALEXPORT int32_t CryptoNative_EcDsaVerify(const uint8_t* dgst, int32_t dgstlen, const uint8_t* sig, int32_t siglen, EC_KEY* key); /* Shims the ECDSA_size method. Returns the maximum length of a DER encoded ECDSA signature created with this key. */ PALEXPORT int32_t CryptoNative_EcDsaSize(const EC_KEY* key);
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "pal_types.h" #include "pal_compiler.h" #include "opensslshim.h" /* Shims the ECDSA_sign method. Returns 1 on success, otherwise 0. */ PALEXPORT int32_t CryptoNative_EcDsaSign(const uint8_t* dgst, int32_t dgstlen, uint8_t* sig, int32_t* siglen, EC_KEY* key); /* Shims the ECDSA_verify method. Returns 1 for a correct signature, 0 for an incorrect signature, -1 on error. */ PALEXPORT int32_t CryptoNative_EcDsaVerify(const uint8_t* dgst, int32_t dgstlen, const uint8_t* sig, int32_t siglen, EC_KEY* key); /* Shims the ECDSA_size method. Returns the maximum length of a DER encoded ECDSA signature created with this key. */ PALEXPORT int32_t CryptoNative_EcDsaSize(const EC_KEY* key);
-1
dotnet/runtime
66,367
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled
When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
jakobbotsch
2022-03-09T00:01:53Z
2022-03-09T12:59:09Z
c9f7f7389e8e9a00d501aef696333b67d218baac
cef825fd5425203ac28d073bf9fccc33c638f179
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled. When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
./src/tests/Interop/StringMarshalling/LPTSTR/LPTStrTestNative.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "../Native/StringMarshalingNative.h" using StringType = LPWSTR; using Tests = StringMarshalingTests<StringType, TP_slen>; #define FUNCTION_NAME __FUNCTIONW__ #include "../Native/StringTestEntrypoints.inl" // Verify that we append extra null terminators to our StringBuilder native buffers. // Although this is a hidden implementation detail, it would be breaking behavior to stop doing this // so we have a test for it. In particular, this detail prevents us from optimizing marshalling StringBuilders by pinning. extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE Verify_NullTerminators_PastEnd(LPCWSTR buffer, int length) { return buffer[length+1] == W('\0'); } struct ByValStringInStructAnsi { char str[20]; }; struct ByValStringInStructUnicode { WCHAR str[20]; }; extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE MatchFuncNameAnsi(ByValStringInStructAnsi str) { return StringMarshalingTests<char*, default_callconv_strlen>::Compare(__func__, str.str); } extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE MatchFuncNameUni(ByValStringInStructUnicode str) { return StringMarshalingTests<LPWSTR, TP_slen>::Compare(__FUNCTIONW__, str.str); } extern "C" DLL_EXPORT void STDMETHODCALLTYPE ReverseByValStringAnsi(ByValStringInStructAnsi* str) { StringMarshalingTests<char*, default_callconv_strlen>::ReverseInplace(str->str); } extern "C" DLL_EXPORT void STDMETHODCALLTYPE ReverseByValStringUni(ByValStringInStructUnicode* str) { StringMarshalingTests<LPWSTR, TP_slen>::ReverseInplace(str->str); } extern "C" DLL_EXPORT void STDMETHODCALLTYPE ReverseCopyByValStringAnsi(ByValStringInStructAnsi str, ByValStringInStructAnsi* out) { *out = str; StringMarshalingTests<char*, default_callconv_strlen>::ReverseInplace(out->str); } extern "C" DLL_EXPORT void STDMETHODCALLTYPE ReverseCopyByValStringUni(ByValStringInStructUnicode str, ByValStringInStructUnicode* out) { *out = str; StringMarshalingTests<LPWSTR, TP_slen>::ReverseInplace(out->str); }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "../Native/StringMarshalingNative.h" using StringType = LPWSTR; using Tests = StringMarshalingTests<StringType, TP_slen>; #define FUNCTION_NAME __FUNCTIONW__ #include "../Native/StringTestEntrypoints.inl" // Verify that we append extra null terminators to our StringBuilder native buffers. // Although this is a hidden implementation detail, it would be breaking behavior to stop doing this // so we have a test for it. In particular, this detail prevents us from optimizing marshalling StringBuilders by pinning. extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE Verify_NullTerminators_PastEnd(LPCWSTR buffer, int length) { return buffer[length+1] == W('\0'); } struct ByValStringInStructAnsi { char str[20]; }; struct ByValStringInStructUnicode { WCHAR str[20]; }; extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE MatchFuncNameAnsi(ByValStringInStructAnsi str) { return StringMarshalingTests<char*, default_callconv_strlen>::Compare(__func__, str.str); } extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE MatchFuncNameUni(ByValStringInStructUnicode str) { return StringMarshalingTests<LPWSTR, TP_slen>::Compare(__FUNCTIONW__, str.str); } extern "C" DLL_EXPORT void STDMETHODCALLTYPE ReverseByValStringAnsi(ByValStringInStructAnsi* str) { StringMarshalingTests<char*, default_callconv_strlen>::ReverseInplace(str->str); } extern "C" DLL_EXPORT void STDMETHODCALLTYPE ReverseByValStringUni(ByValStringInStructUnicode* str) { StringMarshalingTests<LPWSTR, TP_slen>::ReverseInplace(str->str); } extern "C" DLL_EXPORT void STDMETHODCALLTYPE ReverseCopyByValStringAnsi(ByValStringInStructAnsi str, ByValStringInStructAnsi* out) { *out = str; StringMarshalingTests<char*, default_callconv_strlen>::ReverseInplace(out->str); } extern "C" DLL_EXPORT void STDMETHODCALLTYPE ReverseCopyByValStringUni(ByValStringInStructUnicode str, ByValStringInStructUnicode* out) { *out = str; StringMarshalingTests<LPWSTR, TP_slen>::ReverseInplace(out->str); }
-1
dotnet/runtime
66,367
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled
When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
jakobbotsch
2022-03-09T00:01:53Z
2022-03-09T12:59:09Z
c9f7f7389e8e9a00d501aef696333b67d218baac
cef825fd5425203ac28d073bf9fccc33c638f179
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled. When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
./src/coreclr/debug/inc/processdescriptor.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. //***************************************************************************** #ifndef _PROCESSCONTEXT_H #define _PROCESSCONTEXT_H struct ProcessDescriptor { const static DWORD UNINITIALIZED_PID = 0; static ProcessDescriptor Create(DWORD pid, LPCSTR applicationGroupId) { ProcessDescriptor pd; pd.m_Pid = pid; pd.m_ApplicationGroupId = applicationGroupId; return pd; } static ProcessDescriptor FromCurrentProcess(); static ProcessDescriptor FromPid(DWORD pid) { return Create(pid, nullptr); } static ProcessDescriptor CreateUninitialized() { return Create(UNINITIALIZED_PID, nullptr); } bool IsInitialized() const { return m_Pid != UNINITIALIZED_PID; } DWORD m_Pid; LPCSTR m_ApplicationGroupId; }; #endif // _PROCESSCONTEXT_H
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. //***************************************************************************** #ifndef _PROCESSCONTEXT_H #define _PROCESSCONTEXT_H struct ProcessDescriptor { const static DWORD UNINITIALIZED_PID = 0; static ProcessDescriptor Create(DWORD pid, LPCSTR applicationGroupId) { ProcessDescriptor pd; pd.m_Pid = pid; pd.m_ApplicationGroupId = applicationGroupId; return pd; } static ProcessDescriptor FromCurrentProcess(); static ProcessDescriptor FromPid(DWORD pid) { return Create(pid, nullptr); } static ProcessDescriptor CreateUninitialized() { return Create(UNINITIALIZED_PID, nullptr); } bool IsInitialized() const { return m_Pid != UNINITIALIZED_PID; } DWORD m_Pid; LPCSTR m_ApplicationGroupId; }; #endif // _PROCESSCONTEXT_H
-1
dotnet/runtime
66,367
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled
When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
jakobbotsch
2022-03-09T00:01:53Z
2022-03-09T12:59:09Z
c9f7f7389e8e9a00d501aef696333b67d218baac
cef825fd5425203ac28d073bf9fccc33c638f179
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled. When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
./src/mono/mono/metadata/runtime.h
/** * \file * Runtime functions * * Author: * Jonathan Pryor * * (C) 2010 Novell, Inc. */ #ifndef _MONO_METADATA_RUNTIME_H_ #define _MONO_METADATA_RUNTIME_H_ #include <glib.h> #include <mono/metadata/metadata.h> #include <mono/utils/mono-publib.h> #include <mono/utils/mono-compiler.h> MONO_COMPONENT_API gboolean mono_runtime_try_shutdown (void); void mono_runtime_init_tls (void); MONO_PROFILER_API char* mono_runtime_get_aotid (void); MONO_COMPONENT_API MonoAssembly* mono_runtime_get_entry_assembly (void); void mono_runtime_ensure_entry_assembly (MonoAssembly *assembly); #endif /* _MONO_METADATA_RUNTIME_H_ */
/** * \file * Runtime functions * * Author: * Jonathan Pryor * * (C) 2010 Novell, Inc. */ #ifndef _MONO_METADATA_RUNTIME_H_ #define _MONO_METADATA_RUNTIME_H_ #include <glib.h> #include <mono/metadata/metadata.h> #include <mono/utils/mono-publib.h> #include <mono/utils/mono-compiler.h> MONO_COMPONENT_API gboolean mono_runtime_try_shutdown (void); void mono_runtime_init_tls (void); MONO_PROFILER_API char* mono_runtime_get_aotid (void); MONO_COMPONENT_API MonoAssembly* mono_runtime_get_entry_assembly (void); void mono_runtime_ensure_entry_assembly (MonoAssembly *assembly); #endif /* _MONO_METADATA_RUNTIME_H_ */
-1
dotnet/runtime
66,367
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled
When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
jakobbotsch
2022-03-09T00:01:53Z
2022-03-09T12:59:09Z
c9f7f7389e8e9a00d501aef696333b67d218baac
cef825fd5425203ac28d073bf9fccc33c638f179
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled. When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
./src/coreclr/vm/amd64/gmsamd64.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /**************************************************************/ /* gmsAMD64.cpp */ /**************************************************************/ #include "common.h" #include "gmscpu.h" void LazyMachState::unwindLazyState(LazyMachState* baseState, MachState* unwoundState, DWORD threadId, int funCallDepth /* = 1 */, HostCallPreference hostCallPreference /* = (HostCallPreference)(-1) */) { CONTRACTL { NOTHROW; GC_NOTRIGGER; SUPPORTS_DAC; } CONTRACTL_END; CONTEXT ctx; KNONVOLATILE_CONTEXT_POINTERS nonVolRegPtrs; ctx.ContextFlags = 0; // Read by PAL_VirtualUnwind. ctx.Rip = baseState->m_CaptureRip; ctx.Rsp = baseState->m_CaptureRsp + 8; // +8 for return addr pushed before calling LazyMachStateCaptureState #define CALLEE_SAVED_REGISTER(regname) ctx.regname = unwoundState->m_Capture.regname = baseState->m_Capture.regname; ENUM_CALLEE_SAVED_REGISTERS(); #undef CALLEE_SAVED_REGISTER #if !defined(DACCESS_COMPILE) // For DAC, if we get here, it means that the LazyMachState is uninitialized and we have to unwind it. // The API we use to unwind in DAC is StackWalk64(), which does not support the context pointers. #define CALLEE_SAVED_REGISTER(regname) nonVolRegPtrs.regname = (PDWORD64)&unwoundState->m_Capture.regname; ENUM_CALLEE_SAVED_REGISTERS(); #undef CALLEE_SAVED_REGISTER #endif // !DACCESS_COMPILE LOG((LF_GCROOTS, LL_INFO100000, "STACKWALK LazyMachState::unwindLazyState(ip:%p,sp:%p)\n", baseState->m_CaptureRip, baseState->m_CaptureRsp)); PCODE pvControlPc; do { #ifndef TARGET_UNIX pvControlPc = Thread::VirtualUnwindCallFrame(&ctx, &nonVolRegPtrs); #else // !TARGET_UNIX #if defined(DACCESS_COMPILE) HRESULT hr = DacVirtualUnwind(threadId, &ctx, &nonVolRegPtrs); if (FAILED(hr)) { DacError(hr); } #else BOOL success = PAL_VirtualUnwind(&ctx, &nonVolRegPtrs); if (!success) { _ASSERTE(!"unwindLazyState: Unwinding failed"); EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE); } #endif // DACCESS_COMPILE pvControlPc = GetIP(&ctx); #endif // !TARGET_UNIX if (funCallDepth > 0) { --funCallDepth; if (funCallDepth == 0) break; } else { // Determine whether given IP resides in JITted code. (It returns nonzero in that case.) // Use it now to see if we've unwound to managed code yet. BOOL fFailedReaderLock = FALSE; BOOL fIsManagedCode = ExecutionManager::IsManagedCode(pvControlPc, hostCallPreference, &fFailedReaderLock); if (fFailedReaderLock) { // We don't know if we would have been able to find a JIT // manager, because we couldn't enter the reader lock without // yielding (and our caller doesn't want us to yield). So abort // now. // Invalidate the lazyState we're returning, so the caller knows // we aborted before we could fully unwind unwoundState->_pRetAddr = NULL; return; } if (fIsManagedCode) break; } } while(TRUE); // // Update unwoundState so that HelperMethodFrameRestoreState knows which // registers have been potentially modified. // unwoundState->m_Rip = ctx.Rip; unwoundState->m_Rsp = ctx.Rsp; // For DAC, the return value of this function may be used after unwoundState goes out of scope. so we cannot do // "unwoundState->_pRetAddr = PTR_TADDR(&unwoundState->m_Rip)". unwoundState->_pRetAddr = PTR_TADDR(unwoundState->m_Rsp - 8); #ifdef TARGET_UNIX #define CALLEE_SAVED_REGISTER(regname) unwoundState->m_Unwound.regname = ctx.regname; ENUM_CALLEE_SAVED_REGISTERS(); #undef CALLEE_SAVED_REGISTER #endif #if defined(DACCESS_COMPILE) // For DAC, we have to update the registers directly, since we don't have context pointers. #define CALLEE_SAVED_REGISTER(regname) unwoundState->m_Capture.regname = ctx.regname; ENUM_CALLEE_SAVED_REGISTERS(); #undef CALLEE_SAVED_REGISTER // Since we don't have context pointers in this case, just assing them to NULL. #define CALLEE_SAVED_REGISTER(regname) unwoundState->m_Ptrs.p##regname = NULL; ENUM_CALLEE_SAVED_REGISTERS(); #undef CALLEE_SAVED_REGISTER #else // !DACCESS_COMPILE #define CALLEE_SAVED_REGISTER(regname) unwoundState->m_Ptrs.p##regname = PTR_ULONG64(nonVolRegPtrs.regname); ENUM_CALLEE_SAVED_REGISTERS(); #undef CALLEE_SAVED_REGISTER #endif // DACCESS_COMPILE }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /**************************************************************/ /* gmsAMD64.cpp */ /**************************************************************/ #include "common.h" #include "gmscpu.h" void LazyMachState::unwindLazyState(LazyMachState* baseState, MachState* unwoundState, DWORD threadId, int funCallDepth /* = 1 */, HostCallPreference hostCallPreference /* = (HostCallPreference)(-1) */) { CONTRACTL { NOTHROW; GC_NOTRIGGER; SUPPORTS_DAC; } CONTRACTL_END; CONTEXT ctx; KNONVOLATILE_CONTEXT_POINTERS nonVolRegPtrs; ctx.ContextFlags = 0; // Read by PAL_VirtualUnwind. ctx.Rip = baseState->m_CaptureRip; ctx.Rsp = baseState->m_CaptureRsp + 8; // +8 for return addr pushed before calling LazyMachStateCaptureState #define CALLEE_SAVED_REGISTER(regname) ctx.regname = unwoundState->m_Capture.regname = baseState->m_Capture.regname; ENUM_CALLEE_SAVED_REGISTERS(); #undef CALLEE_SAVED_REGISTER #if !defined(DACCESS_COMPILE) // For DAC, if we get here, it means that the LazyMachState is uninitialized and we have to unwind it. // The API we use to unwind in DAC is StackWalk64(), which does not support the context pointers. #define CALLEE_SAVED_REGISTER(regname) nonVolRegPtrs.regname = (PDWORD64)&unwoundState->m_Capture.regname; ENUM_CALLEE_SAVED_REGISTERS(); #undef CALLEE_SAVED_REGISTER #endif // !DACCESS_COMPILE LOG((LF_GCROOTS, LL_INFO100000, "STACKWALK LazyMachState::unwindLazyState(ip:%p,sp:%p)\n", baseState->m_CaptureRip, baseState->m_CaptureRsp)); PCODE pvControlPc; do { #ifndef TARGET_UNIX pvControlPc = Thread::VirtualUnwindCallFrame(&ctx, &nonVolRegPtrs); #else // !TARGET_UNIX #if defined(DACCESS_COMPILE) HRESULT hr = DacVirtualUnwind(threadId, &ctx, &nonVolRegPtrs); if (FAILED(hr)) { DacError(hr); } #else BOOL success = PAL_VirtualUnwind(&ctx, &nonVolRegPtrs); if (!success) { _ASSERTE(!"unwindLazyState: Unwinding failed"); EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE); } #endif // DACCESS_COMPILE pvControlPc = GetIP(&ctx); #endif // !TARGET_UNIX if (funCallDepth > 0) { --funCallDepth; if (funCallDepth == 0) break; } else { // Determine whether given IP resides in JITted code. (It returns nonzero in that case.) // Use it now to see if we've unwound to managed code yet. BOOL fFailedReaderLock = FALSE; BOOL fIsManagedCode = ExecutionManager::IsManagedCode(pvControlPc, hostCallPreference, &fFailedReaderLock); if (fFailedReaderLock) { // We don't know if we would have been able to find a JIT // manager, because we couldn't enter the reader lock without // yielding (and our caller doesn't want us to yield). So abort // now. // Invalidate the lazyState we're returning, so the caller knows // we aborted before we could fully unwind unwoundState->_pRetAddr = NULL; return; } if (fIsManagedCode) break; } } while(TRUE); // // Update unwoundState so that HelperMethodFrameRestoreState knows which // registers have been potentially modified. // unwoundState->m_Rip = ctx.Rip; unwoundState->m_Rsp = ctx.Rsp; // For DAC, the return value of this function may be used after unwoundState goes out of scope. so we cannot do // "unwoundState->_pRetAddr = PTR_TADDR(&unwoundState->m_Rip)". unwoundState->_pRetAddr = PTR_TADDR(unwoundState->m_Rsp - 8); #ifdef TARGET_UNIX #define CALLEE_SAVED_REGISTER(regname) unwoundState->m_Unwound.regname = ctx.regname; ENUM_CALLEE_SAVED_REGISTERS(); #undef CALLEE_SAVED_REGISTER #endif #if defined(DACCESS_COMPILE) // For DAC, we have to update the registers directly, since we don't have context pointers. #define CALLEE_SAVED_REGISTER(regname) unwoundState->m_Capture.regname = ctx.regname; ENUM_CALLEE_SAVED_REGISTERS(); #undef CALLEE_SAVED_REGISTER // Since we don't have context pointers in this case, just assing them to NULL. #define CALLEE_SAVED_REGISTER(regname) unwoundState->m_Ptrs.p##regname = NULL; ENUM_CALLEE_SAVED_REGISTERS(); #undef CALLEE_SAVED_REGISTER #else // !DACCESS_COMPILE #define CALLEE_SAVED_REGISTER(regname) unwoundState->m_Ptrs.p##regname = PTR_ULONG64(nonVolRegPtrs.regname); ENUM_CALLEE_SAVED_REGISTERS(); #undef CALLEE_SAVED_REGISTER #endif // DACCESS_COMPILE }
-1
dotnet/runtime
66,367
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled
When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
jakobbotsch
2022-03-09T00:01:53Z
2022-03-09T12:59:09Z
c9f7f7389e8e9a00d501aef696333b67d218baac
cef825fd5425203ac28d073bf9fccc33c638f179
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled. When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
./src/coreclr/pal/tests/palsuite/c_runtime/strtod/test2/test2.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*===================================================================== ** ** Source: test2.c ** ** Purpose: Tests strtod with overflows ** ** **===================================================================*/ #include <palsuite.h> PALTEST(c_runtime_strtod_test2_paltest_strtod_test2, "c_runtime/strtod/test2/paltest_strtod_test2") { /* Representation of positive infinty for a IEEE 64-bit double */ INT64 PosInifity = (INT64)(0x7ff00000) << 32; double HugeVal = *(double*) &PosInifity; char *PosStr = "1E+10000"; char *NegStr = "-1E+10000"; double result; if (PAL_Initialize(argc,argv)) { return FAIL; } result = strtod(PosStr, NULL); if (result != HugeVal) { Fail("ERROR: wcstod interpreted \"%s\" as %g instead of %g\n", PosStr, result, HugeVal); } result = strtod(NegStr, NULL); if (result != -HugeVal) { Fail("ERROR: wcstod interpreted \"%s\" as %g instead of %g\n", NegStr, result, -HugeVal); } PAL_Terminate(); return PASS; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*===================================================================== ** ** Source: test2.c ** ** Purpose: Tests strtod with overflows ** ** **===================================================================*/ #include <palsuite.h> PALTEST(c_runtime_strtod_test2_paltest_strtod_test2, "c_runtime/strtod/test2/paltest_strtod_test2") { /* Representation of positive infinty for a IEEE 64-bit double */ INT64 PosInifity = (INT64)(0x7ff00000) << 32; double HugeVal = *(double*) &PosInifity; char *PosStr = "1E+10000"; char *NegStr = "-1E+10000"; double result; if (PAL_Initialize(argc,argv)) { return FAIL; } result = strtod(PosStr, NULL); if (result != HugeVal) { Fail("ERROR: wcstod interpreted \"%s\" as %g instead of %g\n", PosStr, result, HugeVal); } result = strtod(NegStr, NULL); if (result != -HugeVal) { Fail("ERROR: wcstod interpreted \"%s\" as %g instead of %g\n", NegStr, result, -HugeVal); } PAL_Terminate(); return PASS; }
-1
dotnet/runtime
66,367
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled
When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
jakobbotsch
2022-03-09T00:01:53Z
2022-03-09T12:59:09Z
c9f7f7389e8e9a00d501aef696333b67d218baac
cef825fd5425203ac28d073bf9fccc33c638f179
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled. When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
./src/coreclr/inc/random.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // random.h // // // Defines a random number generator, initially from the System.Random code in the BCL. // // Main advantages over rand() are: // // 1) It generates better random numbers // 2) It can have multiple instantiations with different seeds // 3) It behaves the same regardless of whether we build with VC++ or GCC // // If you are working in the VM, we have a convenience method: code:GetRandomInt. This usess a thread-local // Random instance if a Thread object is available, and otherwise falls back to a global instance // with a spin-lock. // #ifndef _CLRRANDOM_H_ #define _CLRRANDOM_H_ #include <math.h> // // Forbid the use of srand()/rand(), as these are globally shared facilities and our use of them would // interfere with native user code in the same process. This override is not compatible with stl headers. // #if !defined(DO_NOT_DISABLE_RAND) && !defined(USE_STL) #ifdef srand #undef srand #endif #define srand Do_not_use_srand #ifdef rand #undef rand #endif #define rand Do_not_use_rand #endif //!DO_NOT_DISABLE_RAND && !USE_STL class CLRRandom { private: // // Private Constants // static const int MBIG = INT_MAX; static const int MSEED = 161803398; static const int MZ = 0; // // Member Variables // int inext; int inextp; int SeedArray[56]; bool initialized; public: // // Constructors // CLRRandom() { LIMITED_METHOD_CONTRACT; initialized = false; } void Init() { LIMITED_METHOD_CONTRACT; LARGE_INTEGER time; if (!QueryPerformanceCounter(&time)) time.QuadPart = GetTickCount(); Init((int)time.u.LowPart ^ GetCurrentThreadId() ^ GetCurrentProcessId()); } void Init(int Seed) { LIMITED_METHOD_CONTRACT; int ii; int mj, mk; //Initialize our Seed array. mj = MSEED - abs(Seed); SeedArray[55]=mj; mk=1; for (int i=1; i<55; i++) { //Apparently the range [1..55] is special (Knuth) and so we're wasting the 0'th position. ii = (21*i)%55; SeedArray[ii]=mk; mk = mj - mk; if (mk<0) mk+=MBIG; mj=SeedArray[ii]; } for (int k=1; k<5; k++) { for (int i=1; i<56; i++) { SeedArray[i] -= SeedArray[1+(i+30)%55]; if (SeedArray[i]<0) SeedArray[i]+=MBIG; } } inext=0; inextp = 21; Seed = 1; initialized = true; } bool IsInitialized() { LIMITED_METHOD_CONTRACT; return initialized; } private: // // Package Private Methods // /*====================================Sample==================================== **Action: Return a new random number [0..1) and reSeed the Seed array. **Returns: A double [0..1) **Arguments: None **Exceptions: None ==============================================================================*/ double Sample() { LIMITED_METHOD_CONTRACT; //Including this division at the end gives us significantly improved //random number distribution. return (InternalSample()*(1.0/MBIG)); } int InternalSample() { LIMITED_METHOD_CONTRACT; int retVal; int locINext = inext; int locINextp = inextp; if (++locINext >=56) locINext=1; if (++locINextp>= 56) locINextp = 1; retVal = SeedArray[locINext]-SeedArray[locINextp]; if (retVal == MBIG) retVal--; if (retVal<0) retVal+=MBIG; SeedArray[locINext]=retVal; inext = locINext; inextp = locINextp; return retVal; } double GetSampleForLargeRange() { LIMITED_METHOD_CONTRACT; // The distribution of double value returned by Sample // is not distributed well enough for a large range. // If we use Sample for a range [Int32.MinValue..Int32.MaxValue) // We will end up getting even numbers only. int result = InternalSample(); // Note we can't use addition here. The distribution will be bad if we do that. bool negative = (InternalSample()%2 == 0) ? true : false; // decide the sign based on second sample if( negative) { result = -result; } double d = result; d += (INT_MAX - 1); // get a number in range [0 .. 2 * Int32MaxValue - 1) d /= 2*(unsigned int)INT_MAX - 1 ; return d; } public: // // Public Instance Methods // /*=====================================Next===================================== **Returns: An int [0..Int32.MaxValue) **Arguments: None **Exceptions: None. ==============================================================================*/ int Next() { LIMITED_METHOD_CONTRACT; _ASSERTE(initialized); return InternalSample(); } /*=====================================Next===================================== **Returns: An int [minvalue..maxvalue) **Arguments: minValue -- the least legal value for the Random number. ** maxValue -- One greater than the greatest legal return value. **Exceptions: None. ==============================================================================*/ int Next(int minValue, int maxValue) { LIMITED_METHOD_CONTRACT; _ASSERTE(initialized); _ASSERTE(minValue < maxValue); LONGLONG range = (LONGLONG)maxValue-minValue; double result; if( range <= (LONGLONG)INT_MAX) result = (Sample() * range) + minValue; else result = (GetSampleForLargeRange() * range) + minValue; _ASSERTE(result >= minValue && result < maxValue); return (int)result; } /*=====================================Next===================================== **Returns: An int [0..maxValue) **Arguments: maxValue -- One more than the greatest legal return value. **Exceptions: None. ==============================================================================*/ int Next(int maxValue) { LIMITED_METHOD_CONTRACT; _ASSERTE(initialized); double result = Sample()*maxValue; _ASSERTE(result >= 0 && result < maxValue); return (int)result; } /*=====================================Next===================================== **Returns: A double [0..1) **Arguments: None **Exceptions: None ==============================================================================*/ double NextDouble() { LIMITED_METHOD_CONTRACT; _ASSERTE(initialized); double result = Sample(); _ASSERTE(result >= 0 && result < 1); return result; } /*==================================NextBytes=================================== **Action: Fills the byte array with random bytes [0..0x7f]. The entire array is filled. **Returns:Void **Arguments: buffer -- the array to be filled. **Exceptions: None ==============================================================================*/ void NextBytes(_Out_writes_(length) BYTE buffer[], int length) { LIMITED_METHOD_CONTRACT; _ASSERTE(initialized); for (int i=0; i<length; i++) { buffer[i]=(BYTE)(InternalSample()%(256)); } } }; #endif //_CLRRANDOM_H_
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // random.h // // // Defines a random number generator, initially from the System.Random code in the BCL. // // Main advantages over rand() are: // // 1) It generates better random numbers // 2) It can have multiple instantiations with different seeds // 3) It behaves the same regardless of whether we build with VC++ or GCC // // If you are working in the VM, we have a convenience method: code:GetRandomInt. This usess a thread-local // Random instance if a Thread object is available, and otherwise falls back to a global instance // with a spin-lock. // #ifndef _CLRRANDOM_H_ #define _CLRRANDOM_H_ #include <math.h> // // Forbid the use of srand()/rand(), as these are globally shared facilities and our use of them would // interfere with native user code in the same process. This override is not compatible with stl headers. // #if !defined(DO_NOT_DISABLE_RAND) && !defined(USE_STL) #ifdef srand #undef srand #endif #define srand Do_not_use_srand #ifdef rand #undef rand #endif #define rand Do_not_use_rand #endif //!DO_NOT_DISABLE_RAND && !USE_STL class CLRRandom { private: // // Private Constants // static const int MBIG = INT_MAX; static const int MSEED = 161803398; static const int MZ = 0; // // Member Variables // int inext; int inextp; int SeedArray[56]; bool initialized; public: // // Constructors // CLRRandom() { LIMITED_METHOD_CONTRACT; initialized = false; } void Init() { LIMITED_METHOD_CONTRACT; LARGE_INTEGER time; if (!QueryPerformanceCounter(&time)) time.QuadPart = GetTickCount(); Init((int)time.u.LowPart ^ GetCurrentThreadId() ^ GetCurrentProcessId()); } void Init(int Seed) { LIMITED_METHOD_CONTRACT; int ii; int mj, mk; //Initialize our Seed array. mj = MSEED - abs(Seed); SeedArray[55]=mj; mk=1; for (int i=1; i<55; i++) { //Apparently the range [1..55] is special (Knuth) and so we're wasting the 0'th position. ii = (21*i)%55; SeedArray[ii]=mk; mk = mj - mk; if (mk<0) mk+=MBIG; mj=SeedArray[ii]; } for (int k=1; k<5; k++) { for (int i=1; i<56; i++) { SeedArray[i] -= SeedArray[1+(i+30)%55]; if (SeedArray[i]<0) SeedArray[i]+=MBIG; } } inext=0; inextp = 21; Seed = 1; initialized = true; } bool IsInitialized() { LIMITED_METHOD_CONTRACT; return initialized; } private: // // Package Private Methods // /*====================================Sample==================================== **Action: Return a new random number [0..1) and reSeed the Seed array. **Returns: A double [0..1) **Arguments: None **Exceptions: None ==============================================================================*/ double Sample() { LIMITED_METHOD_CONTRACT; //Including this division at the end gives us significantly improved //random number distribution. return (InternalSample()*(1.0/MBIG)); } int InternalSample() { LIMITED_METHOD_CONTRACT; int retVal; int locINext = inext; int locINextp = inextp; if (++locINext >=56) locINext=1; if (++locINextp>= 56) locINextp = 1; retVal = SeedArray[locINext]-SeedArray[locINextp]; if (retVal == MBIG) retVal--; if (retVal<0) retVal+=MBIG; SeedArray[locINext]=retVal; inext = locINext; inextp = locINextp; return retVal; } double GetSampleForLargeRange() { LIMITED_METHOD_CONTRACT; // The distribution of double value returned by Sample // is not distributed well enough for a large range. // If we use Sample for a range [Int32.MinValue..Int32.MaxValue) // We will end up getting even numbers only. int result = InternalSample(); // Note we can't use addition here. The distribution will be bad if we do that. bool negative = (InternalSample()%2 == 0) ? true : false; // decide the sign based on second sample if( negative) { result = -result; } double d = result; d += (INT_MAX - 1); // get a number in range [0 .. 2 * Int32MaxValue - 1) d /= 2*(unsigned int)INT_MAX - 1 ; return d; } public: // // Public Instance Methods // /*=====================================Next===================================== **Returns: An int [0..Int32.MaxValue) **Arguments: None **Exceptions: None. ==============================================================================*/ int Next() { LIMITED_METHOD_CONTRACT; _ASSERTE(initialized); return InternalSample(); } /*=====================================Next===================================== **Returns: An int [minvalue..maxvalue) **Arguments: minValue -- the least legal value for the Random number. ** maxValue -- One greater than the greatest legal return value. **Exceptions: None. ==============================================================================*/ int Next(int minValue, int maxValue) { LIMITED_METHOD_CONTRACT; _ASSERTE(initialized); _ASSERTE(minValue < maxValue); LONGLONG range = (LONGLONG)maxValue-minValue; double result; if( range <= (LONGLONG)INT_MAX) result = (Sample() * range) + minValue; else result = (GetSampleForLargeRange() * range) + minValue; _ASSERTE(result >= minValue && result < maxValue); return (int)result; } /*=====================================Next===================================== **Returns: An int [0..maxValue) **Arguments: maxValue -- One more than the greatest legal return value. **Exceptions: None. ==============================================================================*/ int Next(int maxValue) { LIMITED_METHOD_CONTRACT; _ASSERTE(initialized); double result = Sample()*maxValue; _ASSERTE(result >= 0 && result < maxValue); return (int)result; } /*=====================================Next===================================== **Returns: A double [0..1) **Arguments: None **Exceptions: None ==============================================================================*/ double NextDouble() { LIMITED_METHOD_CONTRACT; _ASSERTE(initialized); double result = Sample(); _ASSERTE(result >= 0 && result < 1); return result; } /*==================================NextBytes=================================== **Action: Fills the byte array with random bytes [0..0x7f]. The entire array is filled. **Returns:Void **Arguments: buffer -- the array to be filled. **Exceptions: None ==============================================================================*/ void NextBytes(_Out_writes_(length) BYTE buffer[], int length) { LIMITED_METHOD_CONTRACT; _ASSERTE(initialized); for (int i=0; i<length; i++) { buffer[i]=(BYTE)(InternalSample()%(256)); } } }; #endif //_CLRRANDOM_H_
-1
dotnet/runtime
66,367
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled
When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
jakobbotsch
2022-03-09T00:01:53Z
2022-03-09T12:59:09Z
c9f7f7389e8e9a00d501aef696333b67d218baac
cef825fd5425203ac28d073bf9fccc33c638f179
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled. When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
./src/coreclr/vm/i386/cgenx86.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // CGENX86.CPP - // // Various helper routines for generating x86 assembly code. // // // Precompiled Header #include "common.h" #include "field.h" #include "stublink.h" #include "cgensys.h" #include "frames.h" #include "excep.h" #include "dllimport.h" #include "comdelegate.h" #include "log.h" #include "comdelegate.h" #include "array.h" #include "jitinterface.h" #include "codeman.h" #include "dbginterface.h" #include "eeprofinterfaces.h" #include "eeconfig.h" #include "asmconstants.h" #include "class.h" #include "virtualcallstub.h" #include "jitinterface.h" #ifdef FEATURE_COMINTEROP #include "comtoclrcall.h" #include "runtimecallablewrapper.h" #include "comcache.h" #include "olevariant.h" #endif // FEATURE_COMINTEROP #include "stublink.inl" extern "C" DWORD STDCALL GetSpecificCpuTypeAsm(void); extern "C" uint32_t STDCALL GetSpecificCpuFeaturesAsm(uint32_t *pInfo); // NOTE on Frame Size C_ASSERT usage in this file // if the frame size changes then the stubs have to be revisited for correctness // kindly revist the logic and then update the constants so that the C_ASSERT will again fire // if someone changes the frame size. You are expected to keep this hard coded constant // up to date so that changes in the frame size trigger errors at compile time if the code is not altered void generate_noref_copy (unsigned nbytes, StubLinkerCPU* sl); #ifdef FEATURE_EH_FUNCLETS void UpdateRegDisplayFromCalleeSavedRegisters(REGDISPLAY * pRD, CalleeSavedRegisters * regs) { LIMITED_METHOD_CONTRACT; T_CONTEXT * pContext = pRD->pCurrentContext; #define CALLEE_SAVED_REGISTER(regname) pContext->regname = regs->regname; ENUM_CALLEE_SAVED_REGISTERS(); #undef CALLEE_SAVED_REGISTER KNONVOLATILE_CONTEXT_POINTERS * pContextPointers = pRD->pCurrentContextPointers; #define CALLEE_SAVED_REGISTER(regname) pContextPointers->regname = (DWORD*)&regs->regname; ENUM_CALLEE_SAVED_REGISTERS(); #undef CALLEE_SAVED_REGISTER } void ClearRegDisplayArgumentAndScratchRegisters(REGDISPLAY * pRD) { LIMITED_METHOD_CONTRACT; #define ARGUMENT_AND_SCRATCH_REGISTER(regname) pRD->pCurrentContextPointers->regname = NULL; ENUM_ARGUMENT_AND_SCRATCH_REGISTERS(); #undef ARGUMENT_AND_SCRATCH_REGISTER } #endif // FEATURE_EH_FUNCLETS #ifndef DACCESS_COMPILE //--------------------------------------------------------------- // Returns the type of CPU (the value of x of x86) // (Please note, that it returns 6 for P5-II) //--------------------------------------------------------------- void GetSpecificCpuInfo(CORINFO_CPU * cpuInfo) { LIMITED_METHOD_CONTRACT; static CORINFO_CPU val = { 0, 0, 0 }; if (val.dwCPUType) { *cpuInfo = val; return; } CORINFO_CPU tempVal; tempVal.dwCPUType = GetSpecificCpuTypeAsm(); // written in ASM & doesn't participate in contracts _ASSERTE(tempVal.dwCPUType); #ifdef _DEBUG /* Set Family+Model+Stepping string (eg., x690 for Banias, or xF30 for P4 Prescott) * instead of Family only */ const DWORD cpuDefault = 0xFFFFFFFF; static ConfigDWORD cpuFamily; DWORD configCpuFamily = cpuFamily.val(CLRConfig::INTERNAL_CPUFamily); if (configCpuFamily != cpuDefault) { assert((configCpuFamily & 0xFFF) == configCpuFamily); tempVal.dwCPUType = (tempVal.dwCPUType & 0xFFFF0000) | configCpuFamily; } #endif tempVal.dwFeatures = GetSpecificCpuFeaturesAsm(&tempVal.dwExtendedFeatures); // written in ASM & doesn't participate in contracts #ifdef _DEBUG /* Set the 32-bit feature mask */ const DWORD cpuFeaturesDefault = 0xFFFFFFFF; static ConfigDWORD cpuFeatures; DWORD configCpuFeatures = cpuFeatures.val(CLRConfig::INTERNAL_CPUFeatures); if (configCpuFeatures != cpuFeaturesDefault) { tempVal.dwFeatures = configCpuFeatures; } #endif val = *cpuInfo = tempVal; } #endif // #ifndef DACCESS_COMPILE #ifndef FEATURE_EH_FUNCLETS //--------------------------------------------------------------------------------------- // // Initialize the EHContext using the resume PC and the REGDISPLAY. The EHContext is currently used in two // scenarios: to store the register state before calling an EH clause, and to retrieve the ambient SP of a // particular stack frame. resumePC means different things in the two scenarios. In the former case, it // is the IP at which we are going to resume execution when we call an EH clause. In the latter case, it // is just the current IP. // // Arguments: // resumePC - refer to the comment above // regs - This is the REGDISPLAY obtained from the CrawlFrame used in the stackwalk. It represents the // stack frame of the method containing the EH clause we are about to call. For getting the // ambient SP, this is the stack frame we are interested in. // void EHContext::Setup(PCODE resumePC, PREGDISPLAY regs) { LIMITED_METHOD_DAC_CONTRACT; // EAX ECX EDX are scratch this->Esp = regs->SP; this->Ebx = *regs->pEbx; this->Esi = *regs->pEsi; this->Edi = *regs->pEdi; this->Ebp = *regs->pEbp; this->Eip = (ULONG)(size_t)resumePC; } // // Update the registers using new context // // This is necessary to reflect GC pointer changes during the middle of a unwind inside a // finally clause, because: // 1. GC won't see the part of stack inside try (which has thrown an exception) that is already // unwinded and thus GC won't update GC pointers for this portion of the stack, but rather the // call stack in finally. // 2. upon return of finally, the unwind process continues and unwinds stack based on the part // of stack inside try and won't see the updated values in finally. // As a result, we need to manually update the context using register values upon return of finally // // Note that we only update the registers for finally clause because // 1. For filter handlers, stack walker is able to see the whole stack (including the try part) // with the help of ExceptionFilterFrame as filter handlers are called in first pass // 2. For catch handlers, the current unwinding is already finished // void EHContext::UpdateFrame(PREGDISPLAY regs) { LIMITED_METHOD_CONTRACT; // EAX ECX EDX are scratch. // No need to update ESP as unwinder takes care of that for us LOG((LF_EH, LL_INFO1000, "Updating saved EBX: *%p= %p\n", regs->pEbx, this->Ebx)); LOG((LF_EH, LL_INFO1000, "Updating saved ESI: *%p= %p\n", regs->pEsi, this->Esi)); LOG((LF_EH, LL_INFO1000, "Updating saved EDI: *%p= %p\n", regs->pEdi, this->Edi)); LOG((LF_EH, LL_INFO1000, "Updating saved EBP: *%p= %p\n", regs->pEbp, this->Ebp)); *regs->pEbx = this->Ebx; *regs->pEsi = this->Esi; *regs->pEdi = this->Edi; *regs->pEbp = this->Ebp; } #endif // FEATURE_EH_FUNCLETS void TransitionFrame::UpdateRegDisplay(const PREGDISPLAY pRD) { CONTRACT_VOID { NOTHROW; GC_NOTRIGGER; MODE_ANY; HOST_NOCALLS; SUPPORTS_DAC; } CONTRACT_END; ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE(); MethodDesc * pFunc = GetFunction(); _ASSERTE(pFunc != NULL); UpdateRegDisplayHelper(pRD, pFunc->CbStackPop()); LOG((LF_GCROOTS, LL_INFO100000, "STACKWALK TransitionFrame::UpdateRegDisplay(ip:%p, sp:%p)\n", pRD->ControlPC, pRD->SP)); RETURN; } void TransitionFrame::UpdateRegDisplayHelper(const PREGDISPLAY pRD, UINT cbStackPop) { CONTRACT_VOID { NOTHROW; GC_NOTRIGGER; MODE_ANY; HOST_NOCALLS; SUPPORTS_DAC; } CONTRACT_END; CalleeSavedRegisters* regs = GetCalleeSavedRegisters(); pRD->PCTAddr = GetReturnAddressPtr(); #ifdef FEATURE_EH_FUNCLETS DWORD CallerSP = (DWORD)(pRD->PCTAddr + sizeof(TADDR)); pRD->IsCallerContextValid = FALSE; pRD->IsCallerSPValid = FALSE; pRD->pCurrentContext->Eip = *PTR_PCODE(pRD->PCTAddr);; pRD->pCurrentContext->Esp = CallerSP; UpdateRegDisplayFromCalleeSavedRegisters(pRD, regs); ClearRegDisplayArgumentAndScratchRegisters(pRD); SyncRegDisplayToCurrentContext(pRD); #else // FEATURE_EH_FUNCLETS // reset pContext; it's only valid for active (top-most) frame pRD->pContext = NULL; #define CALLEE_SAVED_REGISTER(regname) pRD->p##regname = (DWORD*) &regs->regname; ENUM_CALLEE_SAVED_REGISTERS(); #undef CALLEE_SAVED_REGISTER pRD->ControlPC = *PTR_PCODE(pRD->PCTAddr); pRD->SP = (DWORD)(pRD->PCTAddr + sizeof(TADDR) + cbStackPop); #endif // FEATURE_EH_FUNCLETS RETURN; } void HelperMethodFrame::UpdateRegDisplay(const PREGDISPLAY pRD) { CONTRACT_VOID { NOTHROW; GC_NOTRIGGER; MODE_ANY; HOST_NOCALLS; PRECONDITION(m_MachState.isValid()); // InsureInit has been called SUPPORTS_DAC; } CONTRACT_END; ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE(); LOG((LF_GCROOTS, LL_INFO100000, "STACKWALK HelperMethodFrame::UpdateRegDisplay cached ip:%p, sp:%p\n", m_MachState.GetRetAddr(), m_MachState.esp())); #ifdef FEATURE_EH_FUNCLETS pRD->IsCallerContextValid = FALSE; pRD->IsCallerSPValid = FALSE; // Don't add usage of this field. This is only temporary. #ifdef DACCESS_COMPILE // For DAC, we may get here when the HMF is still uninitialized. // So we may need to unwind here. if (!m_MachState.isValid()) { // This allocation throws on OOM. MachState* pUnwoundState = (MachState*)DacAllocHostOnlyInstance(sizeof(*pUnwoundState), true); InsureInit(false, pUnwoundState); pRD->PCTAddr = dac_cast<TADDR>(pUnwoundState->pRetAddr()); pRD->pCurrentContext->Eip = pRD->ControlPC = pUnwoundState->GetRetAddr(); pRD->pCurrentContext->Esp = pRD->SP = pUnwoundState->esp(); // Do not use pUnwoundState->p##regname() here because it returns NULL in this case pRD->pCurrentContext->Edi = pUnwoundState->_edi; pRD->pCurrentContext->Esi = pUnwoundState->_esi; pRD->pCurrentContext->Ebx = pUnwoundState->_ebx; pRD->pCurrentContext->Ebp = pUnwoundState->_ebp; #define CALLEE_SAVED_REGISTER(regname) pRD->pCurrentContextPointers->regname = (DWORD*) pUnwoundState->p##regname(); ENUM_CALLEE_SAVED_REGISTERS(); #undef CALLEE_SAVED_REGISTER ClearRegDisplayArgumentAndScratchRegisters(pRD); return; } #endif // DACCESS_COMPILE pRD->PCTAddr = dac_cast<TADDR>(m_MachState.pRetAddr()); pRD->pCurrentContext->Eip = pRD->ControlPC = m_MachState.GetRetAddr(); pRD->pCurrentContext->Esp = pRD->SP = (DWORD) m_MachState.esp(); #define CALLEE_SAVED_REGISTER(regname) pRD->pCurrentContext->regname = *((DWORD*) m_MachState.p##regname()); ENUM_CALLEE_SAVED_REGISTERS(); #undef CALLEE_SAVED_REGISTER #define CALLEE_SAVED_REGISTER(regname) pRD->pCurrentContextPointers->regname = (DWORD*) m_MachState.p##regname(); ENUM_CALLEE_SAVED_REGISTERS(); #undef CALLEE_SAVED_REGISTER // // Clear all knowledge of scratch registers. We're skipping to any // arbitrary point on the stack, and frames aren't required to preserve or // keep track of these anyways. // ClearRegDisplayArgumentAndScratchRegisters(pRD); #else // FEATURE_EH_FUNCLETS // reset pContext; it's only valid for active (top-most) frame pRD->pContext = NULL; #ifdef DACCESS_COMPILE // // In the dac case we may have gotten here // without the frame being initialized, so // try and initialize on the fly. // if (!m_MachState.isValid()) { MachState unwindState; InsureInit(false, &unwindState); pRD->PCTAddr = dac_cast<TADDR>(unwindState.pRetAddr()); pRD->ControlPC = unwindState.GetRetAddr(); pRD->SP = unwindState._esp; // Get some special host instance memory // so we have a place to point to. // This host memory has no target address // and so won't be looked up or used for // anything else. MachState* thisState = (MachState*) DacAllocHostOnlyInstance(sizeof(*thisState), true); thisState->_edi = unwindState._edi; pRD->pEdi = (DWORD *)&thisState->_edi; thisState->_esi = unwindState._esi; pRD->pEsi = (DWORD *)&thisState->_esi; thisState->_ebx = unwindState._ebx; pRD->pEbx = (DWORD *)&thisState->_ebx; thisState->_ebp = unwindState._ebp; pRD->pEbp = (DWORD *)&thisState->_ebp; // InsureInit always sets m_RegArgs to zero // in the real code. I'm not sure exactly // what should happen in the on-the-fly case, // but go with what would happen from an InsureInit. RETURN; } #endif // #ifdef DACCESS_COMPILE // DACCESS: The MachState pointers are kept as PTR_TADDR so // the host pointers here refer to the appropriate size and // these casts are not a problem. pRD->pEdi = (DWORD*) m_MachState.pEdi(); pRD->pEsi = (DWORD*) m_MachState.pEsi(); pRD->pEbx = (DWORD*) m_MachState.pEbx(); pRD->pEbp = (DWORD*) m_MachState.pEbp(); pRD->PCTAddr = dac_cast<TADDR>(m_MachState.pRetAddr()); pRD->ControlPC = m_MachState.GetRetAddr(); pRD->SP = (DWORD) m_MachState.esp(); #endif // FEATURE_EH_FUNCLETS RETURN; } #ifdef _DEBUG_IMPL // Confirm that if the machine state was not initialized, then // any unspilled callee saved registers did not change EXTERN_C MachState* STDCALL HelperMethodFrameConfirmState(HelperMethodFrame* frame, void* esiVal, void* ediVal, void* ebxVal, void* ebpVal) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; DEBUG_ONLY; } CONTRACTL_END; MachState* state = frame->MachineState(); // if we've already executed this check once for this helper method frame then // we don't do the check again because it is very expensive. if (frame->HaveDoneConfirmStateCheck()) { return state; } // probe to avoid a kazillion violations in the code that follows. BEGIN_DEBUG_ONLY_CODE; if (!state->isValid()) { frame->InsureInit(false, NULL); _ASSERTE(state->_pEsi != &state->_esi || state->_esi == (TADDR)esiVal); _ASSERTE(state->_pEdi != &state->_edi || state->_edi == (TADDR)ediVal); _ASSERTE(state->_pEbx != &state->_ebx || state->_ebx == (TADDR)ebxVal); _ASSERTE(state->_pEbp != &state->_ebp || state->_ebp == (TADDR)ebpVal); } END_DEBUG_ONLY_CODE; // set that we have executed this check once for this helper method frame. frame->SetHaveDoneConfirmStateCheck(); return state; } #endif void ExternalMethodFrame::UpdateRegDisplay(const PREGDISPLAY pRD) { CONTRACT_VOID { NOTHROW; GC_NOTRIGGER; MODE_ANY; HOST_NOCALLS; SUPPORTS_DAC; } CONTRACT_END; UpdateRegDisplayHelper(pRD, CbStackPopUsingGCRefMap(GetGCRefMap())); LOG((LF_GCROOTS, LL_INFO100000, "STACKWALK ExternalMethodFrane::UpdateRegDisplay(ip:%p, sp:%p)\n", pRD->ControlPC, pRD->SP)); RETURN; } void StubDispatchFrame::UpdateRegDisplay(const PREGDISPLAY pRD) { CONTRACT_VOID { NOTHROW; GC_NOTRIGGER; MODE_ANY; HOST_NOCALLS; SUPPORTS_DAC; } CONTRACT_END; PTR_BYTE pGCRefMap = GetGCRefMap(); if (pGCRefMap != NULL) { UpdateRegDisplayHelper(pRD, CbStackPopUsingGCRefMap(pGCRefMap)); } else if (GetFunction() != NULL) { FramedMethodFrame::UpdateRegDisplay(pRD); } else { UpdateRegDisplayHelper(pRD, 0); // If we do not have owning MethodDesc, we need to pretend that // the call happened on the call instruction to get the ESP unwound properly. // // This path is hit when we are throwing null reference exception from // code:VSD_ResolveWorker or code:StubDispatchFixupWorker pRD->ControlPC = GetAdjustedCallAddress(pRD->ControlPC); } LOG((LF_GCROOTS, LL_INFO100000, "STACKWALK StubDispatchFrame::UpdateRegDisplay(ip:%p, sp:%p)\n", pRD->ControlPC, pRD->SP)); RETURN; } PCODE StubDispatchFrame::GetReturnAddress() { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; PCODE retAddress = FramedMethodFrame::GetReturnAddress(); if (GetFunction() == NULL && GetGCRefMap() == NULL) { // See comment in code:StubDispatchFrame::UpdateRegDisplay retAddress = GetAdjustedCallAddress(retAddress); } return retAddress; } void FaultingExceptionFrame::UpdateRegDisplay(const PREGDISPLAY pRD) { CONTRACT_VOID { NOTHROW; GC_NOTRIGGER; MODE_ANY; HOST_NOCALLS; SUPPORTS_DAC; } CONTRACT_END; pRD->PCTAddr = GetReturnAddressPtr(); #ifdef FEATURE_EH_FUNCLETS memcpy(pRD->pCurrentContext, &m_ctx, sizeof(CONTEXT)); pRD->SP = m_ctx.Esp; pRD->ControlPC = m_ctx.Eip; #define ARGUMENT_AND_SCRATCH_REGISTER(regname) pRD->pCurrentContextPointers->regname = &m_ctx.regname; ENUM_ARGUMENT_AND_SCRATCH_REGISTERS(); #undef ARGUMENT_AND_SCRATCH_REGISTER #define CALLEE_SAVED_REGISTER(regname) pRD->pCurrentContextPointers->regname = &m_ctx.regname; ENUM_CALLEE_SAVED_REGISTERS(); #undef CALLEE_SAVED_REGISTER pRD->IsCallerContextValid = FALSE; pRD->IsCallerSPValid = FALSE; // Don't add usage of this field. This is only temporary. #else // FEATURE_EH_FUNCLETS // reset pContext; it's only valid for active (top-most) frame pRD->pContext = NULL; CalleeSavedRegisters* regs = GetCalleeSavedRegisters(); #define CALLEE_SAVED_REGISTER(regname) pRD->p##regname = (DWORD*) &regs->regname; ENUM_CALLEE_SAVED_REGISTERS(); #undef CALLEE_SAVED_REGISTER pRD->SP = m_Esp; pRD->ControlPC = *PTR_PCODE(pRD->PCTAddr); #endif // FEATURE_EH_FUNCLETS LOG((LF_GCROOTS, LL_INFO100000, "STACKWALK FaultingExceptionFrame::UpdateRegDisplay(ip:%p, sp:%p)\n", pRD->ControlPC, pRD->SP)); RETURN; } void InlinedCallFrame::UpdateRegDisplay(const PREGDISPLAY pRD) { CONTRACT_VOID { NOTHROW; GC_NOTRIGGER; // We should skip over InlinedCallFrame if it is not active. // It will be part of a JITed method's frame, and the stack-walker // can handle such a case. #ifdef PROFILING_SUPPORTED PRECONDITION(CORProfilerStackSnapshotEnabled() || InlinedCallFrame::FrameHasActiveCall(this)); #endif HOST_NOCALLS; MODE_ANY; SUPPORTS_DAC; } CONTRACT_END; // @TODO: Remove this after the debugger is fixed to avoid stack-walks from bad places // @TODO: This may be still needed for sampling profilers if (!InlinedCallFrame::FrameHasActiveCall(this)) { LOG((LF_CORDB, LL_ERROR, "WARNING: InlinedCallFrame::UpdateRegDisplay called on inactive frame %p\n", this)); return; } DWORD stackArgSize = 0; #if !defined(UNIX_X86_ABI) stackArgSize = (DWORD) dac_cast<TADDR>(m_Datum); if (stackArgSize & ~0xFFFF) { NDirectMethodDesc * pMD = PTR_NDirectMethodDesc(m_Datum); /* if this is not an NDirect frame, something is really wrong */ _ASSERTE(pMD->SanityCheck() && pMD->IsNDirect()); stackArgSize = pMD->GetStackArgumentSize(); } #endif /* The return address is just above the "ESP" */ pRD->PCTAddr = PTR_HOST_MEMBER_TADDR(InlinedCallFrame, this, m_pCallerReturnAddress); #ifdef FEATURE_EH_FUNCLETS pRD->IsCallerContextValid = FALSE; pRD->IsCallerSPValid = FALSE; // Don't add usage of this field. This is only temporary. pRD->pCurrentContext->Eip = *PTR_PCODE(pRD->PCTAddr); pRD->pCurrentContext->Esp = (DWORD) dac_cast<TADDR>(m_pCallSiteSP); pRD->pCurrentContext->Ebp = (DWORD) m_pCalleeSavedFP; ClearRegDisplayArgumentAndScratchRegisters(pRD); #define CALLEE_SAVED_REGISTER(regname) pRD->pCurrentContextPointers->regname = NULL; ENUM_CALLEE_SAVED_REGISTERS(); #undef CALLEE_SAVED_REGISTER pRD->pCurrentContextPointers->Ebp = (DWORD*) &m_pCalleeSavedFP; SyncRegDisplayToCurrentContext(pRD); #else // FEATURE_EH_FUNCLETS // reset pContext; it's only valid for active (top-most) frame pRD->pContext = NULL; pRD->pEbp = (DWORD*) &m_pCalleeSavedFP; pRD->ControlPC = *PTR_PCODE(pRD->PCTAddr); /* Now we need to pop off the outgoing arguments */ pRD->SP = (DWORD) dac_cast<TADDR>(m_pCallSiteSP) + stackArgSize; #endif // FEATURE_EH_FUNCLETS LOG((LF_GCROOTS, LL_INFO100000, "STACKWALK InlinedCallFrame::UpdateRegDisplay(ip:%p, sp:%p)\n", pRD->ControlPC, pRD->SP)); RETURN; } #ifdef FEATURE_HIJACK //========================== // Resumable Exception Frame // TADDR ResumableFrame::GetReturnAddressPtr() { LIMITED_METHOD_DAC_CONTRACT; return dac_cast<TADDR>(m_Regs) + offsetof(CONTEXT, Eip); } void ResumableFrame::UpdateRegDisplay(const PREGDISPLAY pRD) { CONTRACT_VOID { NOTHROW; GC_NOTRIGGER; MODE_ANY; HOST_NOCALLS; SUPPORTS_DAC; } CONTRACT_END; pRD->PCTAddr = dac_cast<TADDR>(m_Regs) + offsetof(CONTEXT, Eip); #ifdef FEATURE_EH_FUNCLETS CopyMemory(pRD->pCurrentContext, m_Regs, sizeof(T_CONTEXT)); pRD->SP = m_Regs->Esp; pRD->ControlPC = m_Regs->Eip; #define ARGUMENT_AND_SCRATCH_REGISTER(reg) pRD->pCurrentContextPointers->reg = &m_Regs->reg; ENUM_ARGUMENT_AND_SCRATCH_REGISTERS(); #undef ARGUMENT_AND_SCRATCH_REGISTER #define CALLEE_SAVED_REGISTER(reg) pRD->pCurrentContextPointers->reg = &m_Regs->reg; ENUM_CALLEE_SAVED_REGISTERS(); #undef CALLEE_SAVED_REGISTER pRD->IsCallerContextValid = FALSE; pRD->IsCallerSPValid = FALSE; // Don't add usage of this field. This is only temporary. #else // FEATURE_EH_FUNCLETS // reset pContext; it's only valid for active (top-most) frame pRD->pContext = NULL; CONTEXT* pUnwoundContext = m_Regs; #if !defined(DACCESS_COMPILE) // "pContextForUnwind" field is only used on X86 since not only is it initialized just for it, // but its used only under the confines of STACKWALKER_MAY_POP_FRAMES preprocessor define, // which is defined for x86 only (refer to its definition in stackwalk.cpp). if (pRD->pContextForUnwind != NULL) { pUnwoundContext = pRD->pContextForUnwind; pUnwoundContext->Eax = m_Regs->Eax; pUnwoundContext->Ecx = m_Regs->Ecx; pUnwoundContext->Edx = m_Regs->Edx; pUnwoundContext->Edi = m_Regs->Edi; pUnwoundContext->Esi = m_Regs->Esi; pUnwoundContext->Ebx = m_Regs->Ebx; pUnwoundContext->Ebp = m_Regs->Ebp; pUnwoundContext->Eip = m_Regs->Eip; } #endif // !defined(DACCESS_COMPILE) pRD->pEax = &pUnwoundContext->Eax; pRD->pEcx = &pUnwoundContext->Ecx; pRD->pEdx = &pUnwoundContext->Edx; pRD->pEdi = &pUnwoundContext->Edi; pRD->pEsi = &pUnwoundContext->Esi; pRD->pEbx = &pUnwoundContext->Ebx; pRD->pEbp = &pUnwoundContext->Ebp; pRD->ControlPC = pUnwoundContext->Eip; pRD->SP = m_Regs->Esp; #endif // !FEATURE_EH_FUNCLETS LOG((LF_GCROOTS, LL_INFO100000, "STACKWALK ResumableFrame::UpdateRegDisplay(ip:%p, sp:%p)\n", pRD->ControlPC, pRD->SP)); RETURN; } // The HijackFrame has to know the registers that are pushed by OnHijackTripThread // -> HijackFrame::UpdateRegDisplay should restore all the registers pushed by OnHijackTripThread void HijackFrame::UpdateRegDisplay(const PREGDISPLAY pRD) { CONTRACTL { NOTHROW; GC_NOTRIGGER; HOST_NOCALLS; SUPPORTS_DAC; } CONTRACTL_END; pRD->PCTAddr = dac_cast<TADDR>(m_Args) + offsetof(HijackArgs, Eip); #ifdef FEATURE_EH_FUNCLETS pRD->IsCallerContextValid = FALSE; pRD->IsCallerSPValid = FALSE; // Don't add usage of this field. This is only temporary. pRD->pCurrentContext->Eip = *PTR_PCODE(pRD->PCTAddr); pRD->pCurrentContext->Esp = (DWORD)(pRD->PCTAddr + sizeof(TADDR)); #define RESTORE_REG(reg) { pRD->pCurrentContext->reg = m_Args->reg; pRD->pCurrentContextPointers->reg = &m_Args->reg; } #define CALLEE_SAVED_REGISTER(reg) RESTORE_REG(reg) ENUM_CALLEE_SAVED_REGISTERS(); #undef CALLEE_SAVED_REGISTER #define ARGUMENT_AND_SCRATCH_REGISTER(reg) RESTORE_REG(reg) ENUM_ARGUMENT_AND_SCRATCH_REGISTERS(); #undef ARGUMENT_AND_SCRATCH_REGISTER #undef RESTORE_REG SyncRegDisplayToCurrentContext(pRD); #else // FEATURE_EH_FUNCLETS // This only describes the top-most frame pRD->pContext = NULL; #define RESTORE_REG(reg) { pRD->p##reg = &m_Args->reg; } #define CALLEE_SAVED_REGISTER(reg) RESTORE_REG(reg) ENUM_CALLEE_SAVED_REGISTERS(); #undef CALLEE_SAVED_REGISTER #define ARGUMENT_AND_SCRATCH_REGISTER(reg) RESTORE_REG(reg) ENUM_ARGUMENT_AND_SCRATCH_REGISTERS(); #undef ARGUMENT_AND_SCRATCH_REGISTER #undef RESTORE_REG pRD->ControlPC = *PTR_PCODE(pRD->PCTAddr); pRD->SP = (DWORD)(pRD->PCTAddr + sizeof(TADDR)); #endif // FEATURE_EH_FUNCLETS LOG((LF_GCROOTS, LL_INFO100000, "STACKWALK HijackFrame::UpdateRegDisplay(ip:%p, sp:%p)\n", pRD->ControlPC, pRD->SP)); } #endif // FEATURE_HIJACK void PInvokeCalliFrame::UpdateRegDisplay(const PREGDISPLAY pRD) { CONTRACT_VOID { NOTHROW; GC_NOTRIGGER; MODE_ANY; HOST_NOCALLS; SUPPORTS_DAC; } CONTRACT_END; VASigCookie *pVASigCookie = GetVASigCookie(); UpdateRegDisplayHelper(pRD, pVASigCookie->sizeOfArgs+sizeof(int)); LOG((LF_GCROOTS, LL_INFO100000, "STACKWALK PInvokeCalliFrame::UpdateRegDisplay(ip:%p, sp:%p)\n", pRD->ControlPC, pRD->SP)); RETURN; } #ifndef UNIX_X86_ABI void TailCallFrame::UpdateRegDisplay(const PREGDISPLAY pRD) { CONTRACT_VOID { NOTHROW; GC_NOTRIGGER; MODE_ANY; HOST_NOCALLS; SUPPORTS_DAC; } CONTRACT_END; pRD->PCTAddr = GetReturnAddressPtr(); #ifdef FEATURE_EH_FUNCLETS pRD->IsCallerContextValid = FALSE; pRD->IsCallerSPValid = FALSE; // Don't add usage of this field. This is only temporary. pRD->pCurrentContext->Eip = *PTR_PCODE(pRD->PCTAddr); pRD->pCurrentContext->Esp = (DWORD)(pRD->PCTAddr + sizeof(TADDR)); UpdateRegDisplayFromCalleeSavedRegisters(pRD, &m_regs); ClearRegDisplayArgumentAndScratchRegisters(pRD); SyncRegDisplayToCurrentContext(pRD); #else // reset pContext; it's only valid for active (top-most) frame pRD->pContext = NULL; #define CALLEE_SAVED_REGISTER(regname) pRD->p##regname = (DWORD*) &m_regs.regname; ENUM_CALLEE_SAVED_REGISTERS(); #undef CALLEE_SAVED_REGISTER pRD->ControlPC = *PTR_PCODE(pRD->PCTAddr); pRD->SP = (DWORD)(pRD->PCTAddr + sizeof(TADDR)); #endif LOG((LF_GCROOTS, LL_INFO100000, "STACKWALK TailCallFrame::UpdateRegDisplay(ip:%p, sp:%p)\n", pRD->ControlPC, pRD->SP)); RETURN; } #endif // !UNIX_X86_ABI #ifdef FEATURE_READYTORUN void DynamicHelperFrame::UpdateRegDisplay(const PREGDISPLAY pRD) { WRAPPER_NO_CONTRACT; UpdateRegDisplayHelper(pRD, 0); } #endif // FEATURE_READYTORUN //------------------------------------------------------------------------ // This is declared as returning WORD instead of PRD_TYPE because of // header issues with cgencpu.h including dbginterface.h. WORD GetUnpatchedCodeData(LPCBYTE pAddr) { #ifndef TARGET_X86 #error Make sure this works before porting to platforms other than x86. #endif CONTRACT(WORD) { NOTHROW; GC_NOTRIGGER; PRECONDITION(CORDebuggerAttached()); PRECONDITION(CheckPointer(pAddr)); } CONTRACT_END; // Ordering is because x86 is little-endien. BYTE bLow = pAddr[0]; BYTE bHigh = pAddr[1]; #ifndef DACCESS_COMPILE // Need to make sure that the code we're reading is free of breakpoint patches. PRD_TYPE unpatchedOpcode; if (g_pDebugInterface->CheckGetPatchedOpcode((CORDB_ADDRESS_TYPE *)pAddr, &unpatchedOpcode)) { // PRD_TYPE is supposed to be an opaque debugger structure representing data to remove a patch. // Although PRD_TYPE is currently typedef'ed to be a DWORD_PTR, it's actually semantically just a BYTE. // (since a patch on x86 is just an 0xCC instruction). // Ideally, the debugger subsystem would expose a patch-code stripper that returns BYTE/WORD/etc, and // not force us to crack it ourselves here. bLow = (BYTE) unpatchedOpcode; } // #endif WORD w = bLow + (bHigh << 8); RETURN w; } #ifndef DACCESS_COMPILE Stub *GenerateInitPInvokeFrameHelper() { CONTRACT(Stub*) { STANDARD_VM_CHECK; POSTCONDITION(CheckPointer(RETVAL)); } CONTRACT_END; CPUSTUBLINKER sl; CPUSTUBLINKER *psl = &sl; CORINFO_EE_INFO::InlinedCallFrameInfo FrameInfo; InlinedCallFrame::GetEEInfo(&FrameInfo); // EDI contains address of the frame on stack (the frame ptr, not its negspace) unsigned negSpace = FrameInfo.offsetOfFrameVptr; // mov esi, GetThread() psl->X86EmitCurrentThreadFetch(kESI, (1 << kEDI) | (1 << kEBX) | (1 << kECX) | (1 << kEDX)); // mov [edi + FrameInfo.offsetOfGSCookie], GetProcessGSCookie() psl->X86EmitOffsetModRM(0xc7, (X86Reg)0x0, kEDI, FrameInfo.offsetOfGSCookie - negSpace); psl->Emit32(GetProcessGSCookie()); // mov [edi + FrameInfo.offsetOfFrameVptr], InlinedCallFrame::GetFrameVtable() psl->X86EmitOffsetModRM(0xc7, (X86Reg)0x0, kEDI, FrameInfo.offsetOfFrameVptr - negSpace); psl->Emit32(InlinedCallFrame::GetMethodFrameVPtr()); // mov eax, [esi + offsetof(Thread, m_pFrame)] // mov [edi + FrameInfo.offsetOfFrameLink], eax psl->X86EmitIndexRegLoad(kEAX, kESI, offsetof(Thread, m_pFrame)); psl->X86EmitIndexRegStore(kEDI, FrameInfo.offsetOfFrameLink - negSpace, kEAX); // mov [edi + FrameInfo.offsetOfCalleeSavedEbp], ebp psl->X86EmitIndexRegStore(kEDI, FrameInfo.offsetOfCalleeSavedFP - negSpace, kEBP); // mov [edi + FrameInfo.offsetOfReturnAddress], 0 psl->X86EmitOffsetModRM(0xc7, (X86Reg)0x0, kEDI, FrameInfo.offsetOfReturnAddress - negSpace); psl->Emit32(0); // mov [esi + offsetof(Thread, m_pFrame)], edi psl->X86EmitIndexRegStore(kESI, offsetof(Thread, m_pFrame), kEDI); // leave current Thread in ESI psl->X86EmitReturn(0); // A single process-wide stub that will never unload RETURN psl->Link(SystemDomain::GetGlobalLoaderAllocator()->GetExecutableHeap()); } extern "C" VOID STDCALL StubRareEnableWorker(Thread *pThread) { WRAPPER_NO_CONTRACT; //printf("RareEnable\n"); pThread->RareEnablePreemptiveGC(); } // Disable when calling into managed code from a place that fails via Exceptions extern "C" VOID STDCALL StubRareDisableTHROWWorker(Thread *pThread) { STATIC_CONTRACT_THROWS; STATIC_CONTRACT_GC_TRIGGERS; // Do not add a CONTRACT here. We haven't set up SEH. // WARNING!!!! // when we start executing here, we are actually in cooperative mode. But we // haven't synchronized with the barrier to reentry yet. So we are in a highly // dangerous mode. If we call managed code, we will potentially be active in // the GC heap, even as GC's are occuring! // We must do the following in this order, because otherwise we would be constructing // the exception for the abort without synchronizing with the GC. Also, we have no // CLR SEH set up, despite the fact that we may throw a ThreadAbortException. pThread->RareDisablePreemptiveGC(); pThread->HandleThreadAbort(); } ////////////////////////////////////////////////////////////////////////////// // // JITInterface // ////////////////////////////////////////////////////////////////////////////// /*********************************************************************/ #ifdef EnC_SUPPORTED #pragma warning (disable : 4731) void ResumeAtJit(PCONTEXT pContext, LPVOID oldESP) { // No CONTRACT here, because we can't run the risk of it pushing any SEH into the // current method. STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; #ifdef _DEBUG DWORD curESP; __asm mov curESP, esp #endif if (oldESP) { _ASSERTE(curESP < (DWORD)(size_t)oldESP); // should have popped the SEH records by now as stack has been overwritten _ASSERTE(GetCurrentSEHRecord() > oldESP); } // For the "push Eip, ..., ret" _ASSERTE(curESP < pContext->Esp - sizeof(DWORD)); pContext->Esp -= sizeof(DWORD); __asm { mov ebp, pContext // Push Eip onto the targetESP, so that the final "ret" will consume it mov ecx, [ebp]CONTEXT.Esp mov edx, [ebp]CONTEXT.Eip mov [ecx], edx // Restore all registers except Esp, Ebp, Eip mov eax, [ebp]CONTEXT.Eax mov ebx, [ebp]CONTEXT.Ebx mov ecx, [ebp]CONTEXT.Ecx mov edx, [ebp]CONTEXT.Edx mov esi, [ebp]CONTEXT.Esi mov edi, [ebp]CONTEXT.Edi push [ebp]CONTEXT.Esp // pContext->Esp is (targetESP-sizeof(DWORD)) push [ebp]CONTEXT.Ebp pop ebp pop esp // esp is (targetESP-sizeof(DWORD)), and [esp] is the targetEIP. // The ret will set eip to targetEIP and esp will be automatically // incremented to targetESP ret } } #pragma warning (default : 4731) #endif // !EnC_SUPPORTED #ifndef TARGET_UNIX #pragma warning(push) #pragma warning(disable: 4035) extern "C" DWORD __stdcall xmmYmmStateSupport() { // No CONTRACT STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; __asm { mov ecx, 0 ; Specify xcr0 xgetbv ; result in EDX:EAX and eax, 06H cmp eax, 06H ; check OS has enabled both XMM and YMM state support jne not_supported mov eax, 1 jmp done not_supported: mov eax, 0 done: } } #pragma warning(pop) #else // !TARGET_UNIX void __cpuid(int cpuInfo[4], int function_id) { // Based on the Clang implementation provided in cpuid.h: // https://github.com/llvm/llvm-project/blob/master/clang/lib/Headers/cpuid.h __asm(" cpuid" : "=a"(cpuInfo[0]), "=b"(cpuInfo[1]), "=c"(cpuInfo[2]), "=d"(cpuInfo[3]) \ : "0"(function_id) ); } void __cpuidex(int cpuInfo[4], int function_id, int subFunction_id) { // Based on the Clang implementation provided in cpuid.h: // https://github.com/llvm/llvm-project/blob/master/clang/lib/Headers/cpuid.h __asm(" cpuid" : "=a"(cpuInfo[0]), "=b"(cpuInfo[1]), "=c"(cpuInfo[2]), "=d"(cpuInfo[3]) \ : "0"(function_id), "2"(subFunction_id) ); } extern "C" DWORD __stdcall xmmYmmStateSupport() { DWORD eax; __asm(" xgetbv\n" \ : "=a"(eax) /*output in eax*/\ : "c"(0) /*inputs - 0 in ecx*/\ : "edx" /* registers that are clobbered*/ ); // check OS has enabled both XMM and YMM state support return ((eax & 0x06) == 0x06) ? 1 : 0; } #endif // !TARGET_UNIX void UMEntryThunkCode::Encode(UMEntryThunkCode *pEntryThunkCodeRX, BYTE* pTargetCode, void* pvSecretParam) { LIMITED_METHOD_CONTRACT; #ifdef _DEBUG m_alignpad[0] = X86_INSTR_INT3; m_alignpad[1] = X86_INSTR_INT3; #endif // _DEBUG m_movEAX = X86_INSTR_MOV_EAX_IMM32; m_uet = pvSecretParam; m_jmp = X86_INSTR_JMP_REL32; m_execstub = (BYTE*) ((pTargetCode) - (4+((BYTE*)&pEntryThunkCodeRX->m_execstub))); FlushInstructionCache(GetCurrentProcess(),pEntryThunkCodeRX->GetEntryPoint(),sizeof(UMEntryThunkCode)); } void UMEntryThunkCode::Poison() { LIMITED_METHOD_CONTRACT; ExecutableWriterHolder<UMEntryThunkCode> thunkWriterHolder(this, sizeof(UMEntryThunkCode)); UMEntryThunkCode *pThisRW = thunkWriterHolder.GetRW(); pThisRW->m_execstub = (BYTE*) ((BYTE*)UMEntryThunk::ReportViolation - (4+((BYTE*)&m_execstub))); // mov ecx, imm32 pThisRW->m_movEAX = 0xb9; ClrFlushInstructionCache(GetEntryPoint(),sizeof(UMEntryThunkCode)); } UMEntryThunk* UMEntryThunk::Decode(LPVOID pCallback) { LIMITED_METHOD_CONTRACT; if (*((BYTE*)pCallback) != X86_INSTR_MOV_EAX_IMM32 || ( ((size_t)pCallback) & 3) != 2) { return NULL; } return *(UMEntryThunk**)( 1 + (BYTE*)pCallback ); } BOOL DoesSlotCallPrestub(PCODE pCode) { CONTRACTL { NOTHROW; GC_NOTRIGGER; PRECONDITION(pCode != NULL); PRECONDITION(pCode != GetPreStubEntryPoint()); } CONTRACTL_END; // x86 has the following possible sequences for prestub logic: // 1. slot -> temporary entrypoint -> prestub // 2. slot -> precode -> prestub // 3. slot -> precode -> jumprel32 (NGEN case) -> prestub #ifdef HAS_COMPACT_ENTRYPOINTS if (MethodDescChunk::GetMethodDescFromCompactEntryPoint(pCode, TRUE) != NULL) { return TRUE; } #endif // HAS_COMPACT_ENTRYPOINTS if (!IS_ALIGNED(pCode, PRECODE_ALIGNMENT)) { return FALSE; } #ifdef HAS_FIXUP_PRECODE if (*PTR_BYTE(pCode) == X86_INSTR_CALL_REL32) { // Note that call could have been patched to jmp in the meantime pCode = rel32Decode(pCode+1); // NGEN case if (*PTR_BYTE(pCode) == X86_INSTR_JMP_REL32) { pCode = rel32Decode(pCode+1); } return pCode == (TADDR)PrecodeFixupThunk; } #endif if (*PTR_BYTE(pCode) != X86_INSTR_MOV_EAX_IMM32 || *PTR_BYTE(pCode+5) != X86_INSTR_MOV_RM_R || *PTR_BYTE(pCode+7) != X86_INSTR_JMP_REL32) { return FALSE; } pCode = rel32Decode(pCode+8); // NGEN case if (*PTR_BYTE(pCode) == X86_INSTR_JMP_REL32) { pCode = rel32Decode(pCode+1); } return pCode == GetPreStubEntryPoint(); } #ifdef FEATURE_READYTORUN // // Allocation of dynamic helpers // #define DYNAMIC_HELPER_ALIGNMENT sizeof(TADDR) #define BEGIN_DYNAMIC_HELPER_EMIT(size) \ SIZE_T cb = size; \ SIZE_T cbAligned = ALIGN_UP(cb, DYNAMIC_HELPER_ALIGNMENT); \ BYTE * pStartRX = (BYTE *)(void*)pAllocator->GetDynamicHelpersHeap()->AllocAlignedMem(cbAligned, DYNAMIC_HELPER_ALIGNMENT); \ ExecutableWriterHolder<BYTE> startWriterHolder(pStartRX, cbAligned); \ BYTE * pStart = startWriterHolder.GetRW(); \ size_t rxOffset = pStartRX - pStart; \ BYTE * p = pStart; #define END_DYNAMIC_HELPER_EMIT() \ _ASSERTE(pStart + cb == p); \ while (p < pStart + cbAligned) *p++ = X86_INSTR_INT3; \ ClrFlushInstructionCache(pStartRX, cbAligned); \ return (PCODE)pStartRX PCODE DynamicHelpers::CreateHelper(LoaderAllocator * pAllocator, TADDR arg, PCODE target) { STANDARD_VM_CONTRACT; BEGIN_DYNAMIC_HELPER_EMIT(10); *p++ = 0xB9; // mov ecx, XXXXXX *(INT32 *)p = (INT32)arg; p += 4; *p++ = X86_INSTR_JMP_REL32; // jmp rel32 *(INT32 *)p = rel32UsingJumpStub((INT32 *)(p + rxOffset), target); p += 4; END_DYNAMIC_HELPER_EMIT(); } void DynamicHelpers::EmitHelperWithArg(BYTE*& p, size_t rxOffset, LoaderAllocator * pAllocator, TADDR arg, PCODE target) { CONTRACTL { GC_NOTRIGGER; PRECONDITION(p != NULL && target != NULL); } CONTRACTL_END; // Move an an argument into the second argument register and jump to a target function. *p++ = 0xBA; // mov edx, XXXXXX *(INT32 *)p = (INT32)arg; p += 4; *p++ = X86_INSTR_JMP_REL32; // jmp rel32 *(INT32 *)p = rel32UsingJumpStub((INT32 *)(p + rxOffset), target); p += 4; } PCODE DynamicHelpers::CreateHelperWithArg(LoaderAllocator * pAllocator, TADDR arg, PCODE target) { BEGIN_DYNAMIC_HELPER_EMIT(10); EmitHelperWithArg(p, rxOffset, pAllocator, arg, target); END_DYNAMIC_HELPER_EMIT(); } PCODE DynamicHelpers::CreateHelper(LoaderAllocator * pAllocator, TADDR arg, TADDR arg2, PCODE target) { BEGIN_DYNAMIC_HELPER_EMIT(15); *p++ = 0xB9; // mov ecx, XXXXXX *(INT32 *)p = (INT32)arg; p += 4; *p++ = 0xBA; // mov edx, XXXXXX *(INT32 *)p = (INT32)arg2; p += 4; *p++ = X86_INSTR_JMP_REL32; // jmp rel32 *(INT32 *)p = rel32UsingJumpStub((INT32 *)(p + rxOffset), target); p += 4; END_DYNAMIC_HELPER_EMIT(); } PCODE DynamicHelpers::CreateHelperArgMove(LoaderAllocator * pAllocator, TADDR arg, PCODE target) { BEGIN_DYNAMIC_HELPER_EMIT(12); *(UINT16 *)p = 0xD18B; // mov edx, ecx p += 2; *p++ = 0xB9; // mov ecx, XXXXXX *(INT32 *)p = (INT32)arg; p += 4; *p++ = X86_INSTR_JMP_REL32; // jmp rel32 *(INT32 *)p = rel32UsingJumpStub((INT32 *)(p + rxOffset), target); p += 4; END_DYNAMIC_HELPER_EMIT(); } PCODE DynamicHelpers::CreateReturn(LoaderAllocator * pAllocator) { BEGIN_DYNAMIC_HELPER_EMIT(1); *p++ = 0xC3; // ret END_DYNAMIC_HELPER_EMIT(); } PCODE DynamicHelpers::CreateReturnConst(LoaderAllocator * pAllocator, TADDR arg) { BEGIN_DYNAMIC_HELPER_EMIT(6); *p++ = 0xB8; // mov eax, XXXXXX *(INT32 *)p = (INT32)arg; p += 4; *p++ = 0xC3; // ret END_DYNAMIC_HELPER_EMIT(); } PCODE DynamicHelpers::CreateReturnIndirConst(LoaderAllocator * pAllocator, TADDR arg, INT8 offset) { BEGIN_DYNAMIC_HELPER_EMIT((offset != 0) ? 9 : 6); *p++ = 0xA1; // mov eax, [XXXXXX] *(INT32 *)p = (INT32)arg; p += 4; if (offset != 0) { // add eax, <offset> *p++ = 0x83; *p++ = 0xC0; *p++ = offset; } *p++ = 0xC3; // ret END_DYNAMIC_HELPER_EMIT(); } EXTERN_C VOID DynamicHelperArgsStub(); PCODE DynamicHelpers::CreateHelperWithTwoArgs(LoaderAllocator * pAllocator, TADDR arg, PCODE target) { #ifdef UNIX_X86_ABI BEGIN_DYNAMIC_HELPER_EMIT(18); #else BEGIN_DYNAMIC_HELPER_EMIT(12); #endif #ifdef UNIX_X86_ABI // sub esp, 8 *p++ = 0x83; *p++ = 0xec; *p++ = 0x8; #else // pop eax *p++ = 0x58; #endif // push arg *p++ = 0x68; *(INT32 *)p = arg; p += 4; #ifdef UNIX_X86_ABI // mov eax, target *p++ = 0xB8; *(INT32 *)p = target; p += 4; #else // push eax *p++ = 0x50; #endif *p++ = X86_INSTR_JMP_REL32; // jmp rel32 #ifdef UNIX_X86_ABI *(INT32 *)p = rel32UsingJumpStub((INT32 *)(p + rxOffset), (PCODE)DynamicHelperArgsStub); #else *(INT32 *)p = rel32UsingJumpStub((INT32 *)(p + rxOffset), target); #endif p += 4; END_DYNAMIC_HELPER_EMIT(); } PCODE DynamicHelpers::CreateHelperWithTwoArgs(LoaderAllocator * pAllocator, TADDR arg, TADDR arg2, PCODE target) { #ifdef UNIX_X86_ABI BEGIN_DYNAMIC_HELPER_EMIT(23); #else BEGIN_DYNAMIC_HELPER_EMIT(17); #endif #ifdef UNIX_X86_ABI // sub esp, 4 *p++ = 0x83; *p++ = 0xec; *p++ = 0x4; #else // pop eax *p++ = 0x58; #endif // push arg *p++ = 0x68; *(INT32 *)p = arg; p += 4; // push arg2 *p++ = 0x68; *(INT32 *)p = arg2; p += 4; #ifdef UNIX_X86_ABI // mov eax, target *p++ = 0xB8; *(INT32 *)p = target; p += 4; #else // push eax *p++ = 0x50; #endif *p++ = X86_INSTR_JMP_REL32; // jmp rel32 #ifdef UNIX_X86_ABI *(INT32 *)p = rel32UsingJumpStub((INT32 *)(p + rxOffset), (PCODE)DynamicHelperArgsStub); #else *(INT32 *)p = rel32UsingJumpStub((INT32 *)(p + rxOffset), target); #endif p += 4; END_DYNAMIC_HELPER_EMIT(); } PCODE DynamicHelpers::CreateDictionaryLookupHelper(LoaderAllocator * pAllocator, CORINFO_RUNTIME_LOOKUP * pLookup, DWORD dictionaryIndexAndSlot, Module * pModule) { STANDARD_VM_CONTRACT; PCODE helperAddress = (pLookup->helper == CORINFO_HELP_RUNTIMEHANDLE_METHOD ? GetEEFuncEntryPoint(JIT_GenericHandleMethodWithSlotAndModule) : GetEEFuncEntryPoint(JIT_GenericHandleClassWithSlotAndModule)); GenericHandleArgs * pArgs = (GenericHandleArgs *)(void *)pAllocator->GetDynamicHelpersHeap()->AllocAlignedMem(sizeof(GenericHandleArgs), DYNAMIC_HELPER_ALIGNMENT); ExecutableWriterHolder<GenericHandleArgs> argsWriterHolder(pArgs, sizeof(GenericHandleArgs)); argsWriterHolder.GetRW()->dictionaryIndexAndSlot = dictionaryIndexAndSlot; argsWriterHolder.GetRW()->signature = pLookup->signature; argsWriterHolder.GetRW()->module = (CORINFO_MODULE_HANDLE)pModule; WORD slotOffset = (WORD)(dictionaryIndexAndSlot & 0xFFFF) * sizeof(Dictionary*); // It's available only via the run-time helper function if (pLookup->indirections == CORINFO_USEHELPER) { BEGIN_DYNAMIC_HELPER_EMIT(10); // ecx contains the generic context parameter // mov edx,pArgs // jmp helperAddress EmitHelperWithArg(p, rxOffset, pAllocator, (TADDR)pArgs, helperAddress); END_DYNAMIC_HELPER_EMIT(); } else { int indirectionsSize = 0; for (WORD i = 0; i < pLookup->indirections; i++) indirectionsSize += (pLookup->offsets[i] >= 0x80 ? 6 : 3); int codeSize = indirectionsSize + (pLookup->testForNull ? 15 : 1) + (pLookup->sizeOffset != CORINFO_NO_SIZE_CHECK ? 12 : 0); BEGIN_DYNAMIC_HELPER_EMIT(codeSize); BYTE* pJLECall = NULL; for (WORD i = 0; i < pLookup->indirections; i++) { if (i == pLookup->indirections - 1 && pLookup->sizeOffset != CORINFO_NO_SIZE_CHECK) { _ASSERTE(pLookup->testForNull && i > 0); // cmp dword ptr[eax + sizeOffset],slotOffset *(UINT16*)p = 0xb881; p += 2; *(UINT32*)p = (UINT32)pLookup->sizeOffset; p += 4; *(UINT32*)p = (UINT32)slotOffset; p += 4; // jle 'HELPER CALL' *p++ = 0x7e; pJLECall = p++; // Offset filled later } // Move from ecx if it's the first indirection, otherwise from eax // mov eax,dword ptr [ecx|eax + offset] if (pLookup->offsets[i] >= 0x80) { *(UINT16*)p = (i == 0 ? 0x818b : 0x808b); p += 2; *(UINT32*)p = (UINT32)pLookup->offsets[i]; p += 4; } else { *(UINT16*)p = (i == 0 ? 0x418b : 0x408b); p += 2; *p++ = (BYTE)pLookup->offsets[i]; } } // No null test required if (!pLookup->testForNull) { _ASSERTE(pLookup->sizeOffset == CORINFO_NO_SIZE_CHECK); // No fixups needed for R2R *p++ = 0xC3; // ret } else { // ecx contains the value of the dictionary slot entry _ASSERTE(pLookup->indirections != 0); // test eax,eax *(UINT16*)p = 0xc085; p += 2; // je 'HELPER_CALL' (a jump of 1 byte) *(UINT16*)p = 0x0174; p += 2; *p++ = 0xC3; // ret // 'HELPER_CALL' { if (pJLECall != NULL) *pJLECall = (BYTE)(p - pJLECall - 1); // ecx already contains the generic context parameter // mov edx,pArgs // jmp helperAddress EmitHelperWithArg(p, rxOffset, pAllocator, (TADDR)pArgs, helperAddress); } } END_DYNAMIC_HELPER_EMIT(); } } #endif // FEATURE_READYTORUN #endif // DACCESS_COMPILE
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // CGENX86.CPP - // // Various helper routines for generating x86 assembly code. // // // Precompiled Header #include "common.h" #include "field.h" #include "stublink.h" #include "cgensys.h" #include "frames.h" #include "excep.h" #include "dllimport.h" #include "comdelegate.h" #include "log.h" #include "comdelegate.h" #include "array.h" #include "jitinterface.h" #include "codeman.h" #include "dbginterface.h" #include "eeprofinterfaces.h" #include "eeconfig.h" #include "asmconstants.h" #include "class.h" #include "virtualcallstub.h" #include "jitinterface.h" #ifdef FEATURE_COMINTEROP #include "comtoclrcall.h" #include "runtimecallablewrapper.h" #include "comcache.h" #include "olevariant.h" #endif // FEATURE_COMINTEROP #include "stublink.inl" extern "C" DWORD STDCALL GetSpecificCpuTypeAsm(void); extern "C" uint32_t STDCALL GetSpecificCpuFeaturesAsm(uint32_t *pInfo); // NOTE on Frame Size C_ASSERT usage in this file // if the frame size changes then the stubs have to be revisited for correctness // kindly revist the logic and then update the constants so that the C_ASSERT will again fire // if someone changes the frame size. You are expected to keep this hard coded constant // up to date so that changes in the frame size trigger errors at compile time if the code is not altered void generate_noref_copy (unsigned nbytes, StubLinkerCPU* sl); #ifdef FEATURE_EH_FUNCLETS void UpdateRegDisplayFromCalleeSavedRegisters(REGDISPLAY * pRD, CalleeSavedRegisters * regs) { LIMITED_METHOD_CONTRACT; T_CONTEXT * pContext = pRD->pCurrentContext; #define CALLEE_SAVED_REGISTER(regname) pContext->regname = regs->regname; ENUM_CALLEE_SAVED_REGISTERS(); #undef CALLEE_SAVED_REGISTER KNONVOLATILE_CONTEXT_POINTERS * pContextPointers = pRD->pCurrentContextPointers; #define CALLEE_SAVED_REGISTER(regname) pContextPointers->regname = (DWORD*)&regs->regname; ENUM_CALLEE_SAVED_REGISTERS(); #undef CALLEE_SAVED_REGISTER } void ClearRegDisplayArgumentAndScratchRegisters(REGDISPLAY * pRD) { LIMITED_METHOD_CONTRACT; #define ARGUMENT_AND_SCRATCH_REGISTER(regname) pRD->pCurrentContextPointers->regname = NULL; ENUM_ARGUMENT_AND_SCRATCH_REGISTERS(); #undef ARGUMENT_AND_SCRATCH_REGISTER } #endif // FEATURE_EH_FUNCLETS #ifndef DACCESS_COMPILE //--------------------------------------------------------------- // Returns the type of CPU (the value of x of x86) // (Please note, that it returns 6 for P5-II) //--------------------------------------------------------------- void GetSpecificCpuInfo(CORINFO_CPU * cpuInfo) { LIMITED_METHOD_CONTRACT; static CORINFO_CPU val = { 0, 0, 0 }; if (val.dwCPUType) { *cpuInfo = val; return; } CORINFO_CPU tempVal; tempVal.dwCPUType = GetSpecificCpuTypeAsm(); // written in ASM & doesn't participate in contracts _ASSERTE(tempVal.dwCPUType); #ifdef _DEBUG /* Set Family+Model+Stepping string (eg., x690 for Banias, or xF30 for P4 Prescott) * instead of Family only */ const DWORD cpuDefault = 0xFFFFFFFF; static ConfigDWORD cpuFamily; DWORD configCpuFamily = cpuFamily.val(CLRConfig::INTERNAL_CPUFamily); if (configCpuFamily != cpuDefault) { assert((configCpuFamily & 0xFFF) == configCpuFamily); tempVal.dwCPUType = (tempVal.dwCPUType & 0xFFFF0000) | configCpuFamily; } #endif tempVal.dwFeatures = GetSpecificCpuFeaturesAsm(&tempVal.dwExtendedFeatures); // written in ASM & doesn't participate in contracts #ifdef _DEBUG /* Set the 32-bit feature mask */ const DWORD cpuFeaturesDefault = 0xFFFFFFFF; static ConfigDWORD cpuFeatures; DWORD configCpuFeatures = cpuFeatures.val(CLRConfig::INTERNAL_CPUFeatures); if (configCpuFeatures != cpuFeaturesDefault) { tempVal.dwFeatures = configCpuFeatures; } #endif val = *cpuInfo = tempVal; } #endif // #ifndef DACCESS_COMPILE #ifndef FEATURE_EH_FUNCLETS //--------------------------------------------------------------------------------------- // // Initialize the EHContext using the resume PC and the REGDISPLAY. The EHContext is currently used in two // scenarios: to store the register state before calling an EH clause, and to retrieve the ambient SP of a // particular stack frame. resumePC means different things in the two scenarios. In the former case, it // is the IP at which we are going to resume execution when we call an EH clause. In the latter case, it // is just the current IP. // // Arguments: // resumePC - refer to the comment above // regs - This is the REGDISPLAY obtained from the CrawlFrame used in the stackwalk. It represents the // stack frame of the method containing the EH clause we are about to call. For getting the // ambient SP, this is the stack frame we are interested in. // void EHContext::Setup(PCODE resumePC, PREGDISPLAY regs) { LIMITED_METHOD_DAC_CONTRACT; // EAX ECX EDX are scratch this->Esp = regs->SP; this->Ebx = *regs->pEbx; this->Esi = *regs->pEsi; this->Edi = *regs->pEdi; this->Ebp = *regs->pEbp; this->Eip = (ULONG)(size_t)resumePC; } // // Update the registers using new context // // This is necessary to reflect GC pointer changes during the middle of a unwind inside a // finally clause, because: // 1. GC won't see the part of stack inside try (which has thrown an exception) that is already // unwinded and thus GC won't update GC pointers for this portion of the stack, but rather the // call stack in finally. // 2. upon return of finally, the unwind process continues and unwinds stack based on the part // of stack inside try and won't see the updated values in finally. // As a result, we need to manually update the context using register values upon return of finally // // Note that we only update the registers for finally clause because // 1. For filter handlers, stack walker is able to see the whole stack (including the try part) // with the help of ExceptionFilterFrame as filter handlers are called in first pass // 2. For catch handlers, the current unwinding is already finished // void EHContext::UpdateFrame(PREGDISPLAY regs) { LIMITED_METHOD_CONTRACT; // EAX ECX EDX are scratch. // No need to update ESP as unwinder takes care of that for us LOG((LF_EH, LL_INFO1000, "Updating saved EBX: *%p= %p\n", regs->pEbx, this->Ebx)); LOG((LF_EH, LL_INFO1000, "Updating saved ESI: *%p= %p\n", regs->pEsi, this->Esi)); LOG((LF_EH, LL_INFO1000, "Updating saved EDI: *%p= %p\n", regs->pEdi, this->Edi)); LOG((LF_EH, LL_INFO1000, "Updating saved EBP: *%p= %p\n", regs->pEbp, this->Ebp)); *regs->pEbx = this->Ebx; *regs->pEsi = this->Esi; *regs->pEdi = this->Edi; *regs->pEbp = this->Ebp; } #endif // FEATURE_EH_FUNCLETS void TransitionFrame::UpdateRegDisplay(const PREGDISPLAY pRD) { CONTRACT_VOID { NOTHROW; GC_NOTRIGGER; MODE_ANY; HOST_NOCALLS; SUPPORTS_DAC; } CONTRACT_END; ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE(); MethodDesc * pFunc = GetFunction(); _ASSERTE(pFunc != NULL); UpdateRegDisplayHelper(pRD, pFunc->CbStackPop()); LOG((LF_GCROOTS, LL_INFO100000, "STACKWALK TransitionFrame::UpdateRegDisplay(ip:%p, sp:%p)\n", pRD->ControlPC, pRD->SP)); RETURN; } void TransitionFrame::UpdateRegDisplayHelper(const PREGDISPLAY pRD, UINT cbStackPop) { CONTRACT_VOID { NOTHROW; GC_NOTRIGGER; MODE_ANY; HOST_NOCALLS; SUPPORTS_DAC; } CONTRACT_END; CalleeSavedRegisters* regs = GetCalleeSavedRegisters(); pRD->PCTAddr = GetReturnAddressPtr(); #ifdef FEATURE_EH_FUNCLETS DWORD CallerSP = (DWORD)(pRD->PCTAddr + sizeof(TADDR)); pRD->IsCallerContextValid = FALSE; pRD->IsCallerSPValid = FALSE; pRD->pCurrentContext->Eip = *PTR_PCODE(pRD->PCTAddr);; pRD->pCurrentContext->Esp = CallerSP; UpdateRegDisplayFromCalleeSavedRegisters(pRD, regs); ClearRegDisplayArgumentAndScratchRegisters(pRD); SyncRegDisplayToCurrentContext(pRD); #else // FEATURE_EH_FUNCLETS // reset pContext; it's only valid for active (top-most) frame pRD->pContext = NULL; #define CALLEE_SAVED_REGISTER(regname) pRD->p##regname = (DWORD*) &regs->regname; ENUM_CALLEE_SAVED_REGISTERS(); #undef CALLEE_SAVED_REGISTER pRD->ControlPC = *PTR_PCODE(pRD->PCTAddr); pRD->SP = (DWORD)(pRD->PCTAddr + sizeof(TADDR) + cbStackPop); #endif // FEATURE_EH_FUNCLETS RETURN; } void HelperMethodFrame::UpdateRegDisplay(const PREGDISPLAY pRD) { CONTRACT_VOID { NOTHROW; GC_NOTRIGGER; MODE_ANY; HOST_NOCALLS; PRECONDITION(m_MachState.isValid()); // InsureInit has been called SUPPORTS_DAC; } CONTRACT_END; ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE(); LOG((LF_GCROOTS, LL_INFO100000, "STACKWALK HelperMethodFrame::UpdateRegDisplay cached ip:%p, sp:%p\n", m_MachState.GetRetAddr(), m_MachState.esp())); #ifdef FEATURE_EH_FUNCLETS pRD->IsCallerContextValid = FALSE; pRD->IsCallerSPValid = FALSE; // Don't add usage of this field. This is only temporary. #ifdef DACCESS_COMPILE // For DAC, we may get here when the HMF is still uninitialized. // So we may need to unwind here. if (!m_MachState.isValid()) { // This allocation throws on OOM. MachState* pUnwoundState = (MachState*)DacAllocHostOnlyInstance(sizeof(*pUnwoundState), true); InsureInit(false, pUnwoundState); pRD->PCTAddr = dac_cast<TADDR>(pUnwoundState->pRetAddr()); pRD->pCurrentContext->Eip = pRD->ControlPC = pUnwoundState->GetRetAddr(); pRD->pCurrentContext->Esp = pRD->SP = pUnwoundState->esp(); // Do not use pUnwoundState->p##regname() here because it returns NULL in this case pRD->pCurrentContext->Edi = pUnwoundState->_edi; pRD->pCurrentContext->Esi = pUnwoundState->_esi; pRD->pCurrentContext->Ebx = pUnwoundState->_ebx; pRD->pCurrentContext->Ebp = pUnwoundState->_ebp; #define CALLEE_SAVED_REGISTER(regname) pRD->pCurrentContextPointers->regname = (DWORD*) pUnwoundState->p##regname(); ENUM_CALLEE_SAVED_REGISTERS(); #undef CALLEE_SAVED_REGISTER ClearRegDisplayArgumentAndScratchRegisters(pRD); return; } #endif // DACCESS_COMPILE pRD->PCTAddr = dac_cast<TADDR>(m_MachState.pRetAddr()); pRD->pCurrentContext->Eip = pRD->ControlPC = m_MachState.GetRetAddr(); pRD->pCurrentContext->Esp = pRD->SP = (DWORD) m_MachState.esp(); #define CALLEE_SAVED_REGISTER(regname) pRD->pCurrentContext->regname = *((DWORD*) m_MachState.p##regname()); ENUM_CALLEE_SAVED_REGISTERS(); #undef CALLEE_SAVED_REGISTER #define CALLEE_SAVED_REGISTER(regname) pRD->pCurrentContextPointers->regname = (DWORD*) m_MachState.p##regname(); ENUM_CALLEE_SAVED_REGISTERS(); #undef CALLEE_SAVED_REGISTER // // Clear all knowledge of scratch registers. We're skipping to any // arbitrary point on the stack, and frames aren't required to preserve or // keep track of these anyways. // ClearRegDisplayArgumentAndScratchRegisters(pRD); #else // FEATURE_EH_FUNCLETS // reset pContext; it's only valid for active (top-most) frame pRD->pContext = NULL; #ifdef DACCESS_COMPILE // // In the dac case we may have gotten here // without the frame being initialized, so // try and initialize on the fly. // if (!m_MachState.isValid()) { MachState unwindState; InsureInit(false, &unwindState); pRD->PCTAddr = dac_cast<TADDR>(unwindState.pRetAddr()); pRD->ControlPC = unwindState.GetRetAddr(); pRD->SP = unwindState._esp; // Get some special host instance memory // so we have a place to point to. // This host memory has no target address // and so won't be looked up or used for // anything else. MachState* thisState = (MachState*) DacAllocHostOnlyInstance(sizeof(*thisState), true); thisState->_edi = unwindState._edi; pRD->pEdi = (DWORD *)&thisState->_edi; thisState->_esi = unwindState._esi; pRD->pEsi = (DWORD *)&thisState->_esi; thisState->_ebx = unwindState._ebx; pRD->pEbx = (DWORD *)&thisState->_ebx; thisState->_ebp = unwindState._ebp; pRD->pEbp = (DWORD *)&thisState->_ebp; // InsureInit always sets m_RegArgs to zero // in the real code. I'm not sure exactly // what should happen in the on-the-fly case, // but go with what would happen from an InsureInit. RETURN; } #endif // #ifdef DACCESS_COMPILE // DACCESS: The MachState pointers are kept as PTR_TADDR so // the host pointers here refer to the appropriate size and // these casts are not a problem. pRD->pEdi = (DWORD*) m_MachState.pEdi(); pRD->pEsi = (DWORD*) m_MachState.pEsi(); pRD->pEbx = (DWORD*) m_MachState.pEbx(); pRD->pEbp = (DWORD*) m_MachState.pEbp(); pRD->PCTAddr = dac_cast<TADDR>(m_MachState.pRetAddr()); pRD->ControlPC = m_MachState.GetRetAddr(); pRD->SP = (DWORD) m_MachState.esp(); #endif // FEATURE_EH_FUNCLETS RETURN; } #ifdef _DEBUG_IMPL // Confirm that if the machine state was not initialized, then // any unspilled callee saved registers did not change EXTERN_C MachState* STDCALL HelperMethodFrameConfirmState(HelperMethodFrame* frame, void* esiVal, void* ediVal, void* ebxVal, void* ebpVal) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; DEBUG_ONLY; } CONTRACTL_END; MachState* state = frame->MachineState(); // if we've already executed this check once for this helper method frame then // we don't do the check again because it is very expensive. if (frame->HaveDoneConfirmStateCheck()) { return state; } // probe to avoid a kazillion violations in the code that follows. BEGIN_DEBUG_ONLY_CODE; if (!state->isValid()) { frame->InsureInit(false, NULL); _ASSERTE(state->_pEsi != &state->_esi || state->_esi == (TADDR)esiVal); _ASSERTE(state->_pEdi != &state->_edi || state->_edi == (TADDR)ediVal); _ASSERTE(state->_pEbx != &state->_ebx || state->_ebx == (TADDR)ebxVal); _ASSERTE(state->_pEbp != &state->_ebp || state->_ebp == (TADDR)ebpVal); } END_DEBUG_ONLY_CODE; // set that we have executed this check once for this helper method frame. frame->SetHaveDoneConfirmStateCheck(); return state; } #endif void ExternalMethodFrame::UpdateRegDisplay(const PREGDISPLAY pRD) { CONTRACT_VOID { NOTHROW; GC_NOTRIGGER; MODE_ANY; HOST_NOCALLS; SUPPORTS_DAC; } CONTRACT_END; UpdateRegDisplayHelper(pRD, CbStackPopUsingGCRefMap(GetGCRefMap())); LOG((LF_GCROOTS, LL_INFO100000, "STACKWALK ExternalMethodFrane::UpdateRegDisplay(ip:%p, sp:%p)\n", pRD->ControlPC, pRD->SP)); RETURN; } void StubDispatchFrame::UpdateRegDisplay(const PREGDISPLAY pRD) { CONTRACT_VOID { NOTHROW; GC_NOTRIGGER; MODE_ANY; HOST_NOCALLS; SUPPORTS_DAC; } CONTRACT_END; PTR_BYTE pGCRefMap = GetGCRefMap(); if (pGCRefMap != NULL) { UpdateRegDisplayHelper(pRD, CbStackPopUsingGCRefMap(pGCRefMap)); } else if (GetFunction() != NULL) { FramedMethodFrame::UpdateRegDisplay(pRD); } else { UpdateRegDisplayHelper(pRD, 0); // If we do not have owning MethodDesc, we need to pretend that // the call happened on the call instruction to get the ESP unwound properly. // // This path is hit when we are throwing null reference exception from // code:VSD_ResolveWorker or code:StubDispatchFixupWorker pRD->ControlPC = GetAdjustedCallAddress(pRD->ControlPC); } LOG((LF_GCROOTS, LL_INFO100000, "STACKWALK StubDispatchFrame::UpdateRegDisplay(ip:%p, sp:%p)\n", pRD->ControlPC, pRD->SP)); RETURN; } PCODE StubDispatchFrame::GetReturnAddress() { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; PCODE retAddress = FramedMethodFrame::GetReturnAddress(); if (GetFunction() == NULL && GetGCRefMap() == NULL) { // See comment in code:StubDispatchFrame::UpdateRegDisplay retAddress = GetAdjustedCallAddress(retAddress); } return retAddress; } void FaultingExceptionFrame::UpdateRegDisplay(const PREGDISPLAY pRD) { CONTRACT_VOID { NOTHROW; GC_NOTRIGGER; MODE_ANY; HOST_NOCALLS; SUPPORTS_DAC; } CONTRACT_END; pRD->PCTAddr = GetReturnAddressPtr(); #ifdef FEATURE_EH_FUNCLETS memcpy(pRD->pCurrentContext, &m_ctx, sizeof(CONTEXT)); pRD->SP = m_ctx.Esp; pRD->ControlPC = m_ctx.Eip; #define ARGUMENT_AND_SCRATCH_REGISTER(regname) pRD->pCurrentContextPointers->regname = &m_ctx.regname; ENUM_ARGUMENT_AND_SCRATCH_REGISTERS(); #undef ARGUMENT_AND_SCRATCH_REGISTER #define CALLEE_SAVED_REGISTER(regname) pRD->pCurrentContextPointers->regname = &m_ctx.regname; ENUM_CALLEE_SAVED_REGISTERS(); #undef CALLEE_SAVED_REGISTER pRD->IsCallerContextValid = FALSE; pRD->IsCallerSPValid = FALSE; // Don't add usage of this field. This is only temporary. #else // FEATURE_EH_FUNCLETS // reset pContext; it's only valid for active (top-most) frame pRD->pContext = NULL; CalleeSavedRegisters* regs = GetCalleeSavedRegisters(); #define CALLEE_SAVED_REGISTER(regname) pRD->p##regname = (DWORD*) &regs->regname; ENUM_CALLEE_SAVED_REGISTERS(); #undef CALLEE_SAVED_REGISTER pRD->SP = m_Esp; pRD->ControlPC = *PTR_PCODE(pRD->PCTAddr); #endif // FEATURE_EH_FUNCLETS LOG((LF_GCROOTS, LL_INFO100000, "STACKWALK FaultingExceptionFrame::UpdateRegDisplay(ip:%p, sp:%p)\n", pRD->ControlPC, pRD->SP)); RETURN; } void InlinedCallFrame::UpdateRegDisplay(const PREGDISPLAY pRD) { CONTRACT_VOID { NOTHROW; GC_NOTRIGGER; // We should skip over InlinedCallFrame if it is not active. // It will be part of a JITed method's frame, and the stack-walker // can handle such a case. #ifdef PROFILING_SUPPORTED PRECONDITION(CORProfilerStackSnapshotEnabled() || InlinedCallFrame::FrameHasActiveCall(this)); #endif HOST_NOCALLS; MODE_ANY; SUPPORTS_DAC; } CONTRACT_END; // @TODO: Remove this after the debugger is fixed to avoid stack-walks from bad places // @TODO: This may be still needed for sampling profilers if (!InlinedCallFrame::FrameHasActiveCall(this)) { LOG((LF_CORDB, LL_ERROR, "WARNING: InlinedCallFrame::UpdateRegDisplay called on inactive frame %p\n", this)); return; } DWORD stackArgSize = 0; #if !defined(UNIX_X86_ABI) stackArgSize = (DWORD) dac_cast<TADDR>(m_Datum); if (stackArgSize & ~0xFFFF) { NDirectMethodDesc * pMD = PTR_NDirectMethodDesc(m_Datum); /* if this is not an NDirect frame, something is really wrong */ _ASSERTE(pMD->SanityCheck() && pMD->IsNDirect()); stackArgSize = pMD->GetStackArgumentSize(); } #endif /* The return address is just above the "ESP" */ pRD->PCTAddr = PTR_HOST_MEMBER_TADDR(InlinedCallFrame, this, m_pCallerReturnAddress); #ifdef FEATURE_EH_FUNCLETS pRD->IsCallerContextValid = FALSE; pRD->IsCallerSPValid = FALSE; // Don't add usage of this field. This is only temporary. pRD->pCurrentContext->Eip = *PTR_PCODE(pRD->PCTAddr); pRD->pCurrentContext->Esp = (DWORD) dac_cast<TADDR>(m_pCallSiteSP); pRD->pCurrentContext->Ebp = (DWORD) m_pCalleeSavedFP; ClearRegDisplayArgumentAndScratchRegisters(pRD); #define CALLEE_SAVED_REGISTER(regname) pRD->pCurrentContextPointers->regname = NULL; ENUM_CALLEE_SAVED_REGISTERS(); #undef CALLEE_SAVED_REGISTER pRD->pCurrentContextPointers->Ebp = (DWORD*) &m_pCalleeSavedFP; SyncRegDisplayToCurrentContext(pRD); #else // FEATURE_EH_FUNCLETS // reset pContext; it's only valid for active (top-most) frame pRD->pContext = NULL; pRD->pEbp = (DWORD*) &m_pCalleeSavedFP; pRD->ControlPC = *PTR_PCODE(pRD->PCTAddr); /* Now we need to pop off the outgoing arguments */ pRD->SP = (DWORD) dac_cast<TADDR>(m_pCallSiteSP) + stackArgSize; #endif // FEATURE_EH_FUNCLETS LOG((LF_GCROOTS, LL_INFO100000, "STACKWALK InlinedCallFrame::UpdateRegDisplay(ip:%p, sp:%p)\n", pRD->ControlPC, pRD->SP)); RETURN; } #ifdef FEATURE_HIJACK //========================== // Resumable Exception Frame // TADDR ResumableFrame::GetReturnAddressPtr() { LIMITED_METHOD_DAC_CONTRACT; return dac_cast<TADDR>(m_Regs) + offsetof(CONTEXT, Eip); } void ResumableFrame::UpdateRegDisplay(const PREGDISPLAY pRD) { CONTRACT_VOID { NOTHROW; GC_NOTRIGGER; MODE_ANY; HOST_NOCALLS; SUPPORTS_DAC; } CONTRACT_END; pRD->PCTAddr = dac_cast<TADDR>(m_Regs) + offsetof(CONTEXT, Eip); #ifdef FEATURE_EH_FUNCLETS CopyMemory(pRD->pCurrentContext, m_Regs, sizeof(T_CONTEXT)); pRD->SP = m_Regs->Esp; pRD->ControlPC = m_Regs->Eip; #define ARGUMENT_AND_SCRATCH_REGISTER(reg) pRD->pCurrentContextPointers->reg = &m_Regs->reg; ENUM_ARGUMENT_AND_SCRATCH_REGISTERS(); #undef ARGUMENT_AND_SCRATCH_REGISTER #define CALLEE_SAVED_REGISTER(reg) pRD->pCurrentContextPointers->reg = &m_Regs->reg; ENUM_CALLEE_SAVED_REGISTERS(); #undef CALLEE_SAVED_REGISTER pRD->IsCallerContextValid = FALSE; pRD->IsCallerSPValid = FALSE; // Don't add usage of this field. This is only temporary. #else // FEATURE_EH_FUNCLETS // reset pContext; it's only valid for active (top-most) frame pRD->pContext = NULL; CONTEXT* pUnwoundContext = m_Regs; #if !defined(DACCESS_COMPILE) // "pContextForUnwind" field is only used on X86 since not only is it initialized just for it, // but its used only under the confines of STACKWALKER_MAY_POP_FRAMES preprocessor define, // which is defined for x86 only (refer to its definition in stackwalk.cpp). if (pRD->pContextForUnwind != NULL) { pUnwoundContext = pRD->pContextForUnwind; pUnwoundContext->Eax = m_Regs->Eax; pUnwoundContext->Ecx = m_Regs->Ecx; pUnwoundContext->Edx = m_Regs->Edx; pUnwoundContext->Edi = m_Regs->Edi; pUnwoundContext->Esi = m_Regs->Esi; pUnwoundContext->Ebx = m_Regs->Ebx; pUnwoundContext->Ebp = m_Regs->Ebp; pUnwoundContext->Eip = m_Regs->Eip; } #endif // !defined(DACCESS_COMPILE) pRD->pEax = &pUnwoundContext->Eax; pRD->pEcx = &pUnwoundContext->Ecx; pRD->pEdx = &pUnwoundContext->Edx; pRD->pEdi = &pUnwoundContext->Edi; pRD->pEsi = &pUnwoundContext->Esi; pRD->pEbx = &pUnwoundContext->Ebx; pRD->pEbp = &pUnwoundContext->Ebp; pRD->ControlPC = pUnwoundContext->Eip; pRD->SP = m_Regs->Esp; #endif // !FEATURE_EH_FUNCLETS LOG((LF_GCROOTS, LL_INFO100000, "STACKWALK ResumableFrame::UpdateRegDisplay(ip:%p, sp:%p)\n", pRD->ControlPC, pRD->SP)); RETURN; } // The HijackFrame has to know the registers that are pushed by OnHijackTripThread // -> HijackFrame::UpdateRegDisplay should restore all the registers pushed by OnHijackTripThread void HijackFrame::UpdateRegDisplay(const PREGDISPLAY pRD) { CONTRACTL { NOTHROW; GC_NOTRIGGER; HOST_NOCALLS; SUPPORTS_DAC; } CONTRACTL_END; pRD->PCTAddr = dac_cast<TADDR>(m_Args) + offsetof(HijackArgs, Eip); #ifdef FEATURE_EH_FUNCLETS pRD->IsCallerContextValid = FALSE; pRD->IsCallerSPValid = FALSE; // Don't add usage of this field. This is only temporary. pRD->pCurrentContext->Eip = *PTR_PCODE(pRD->PCTAddr); pRD->pCurrentContext->Esp = (DWORD)(pRD->PCTAddr + sizeof(TADDR)); #define RESTORE_REG(reg) { pRD->pCurrentContext->reg = m_Args->reg; pRD->pCurrentContextPointers->reg = &m_Args->reg; } #define CALLEE_SAVED_REGISTER(reg) RESTORE_REG(reg) ENUM_CALLEE_SAVED_REGISTERS(); #undef CALLEE_SAVED_REGISTER #define ARGUMENT_AND_SCRATCH_REGISTER(reg) RESTORE_REG(reg) ENUM_ARGUMENT_AND_SCRATCH_REGISTERS(); #undef ARGUMENT_AND_SCRATCH_REGISTER #undef RESTORE_REG SyncRegDisplayToCurrentContext(pRD); #else // FEATURE_EH_FUNCLETS // This only describes the top-most frame pRD->pContext = NULL; #define RESTORE_REG(reg) { pRD->p##reg = &m_Args->reg; } #define CALLEE_SAVED_REGISTER(reg) RESTORE_REG(reg) ENUM_CALLEE_SAVED_REGISTERS(); #undef CALLEE_SAVED_REGISTER #define ARGUMENT_AND_SCRATCH_REGISTER(reg) RESTORE_REG(reg) ENUM_ARGUMENT_AND_SCRATCH_REGISTERS(); #undef ARGUMENT_AND_SCRATCH_REGISTER #undef RESTORE_REG pRD->ControlPC = *PTR_PCODE(pRD->PCTAddr); pRD->SP = (DWORD)(pRD->PCTAddr + sizeof(TADDR)); #endif // FEATURE_EH_FUNCLETS LOG((LF_GCROOTS, LL_INFO100000, "STACKWALK HijackFrame::UpdateRegDisplay(ip:%p, sp:%p)\n", pRD->ControlPC, pRD->SP)); } #endif // FEATURE_HIJACK void PInvokeCalliFrame::UpdateRegDisplay(const PREGDISPLAY pRD) { CONTRACT_VOID { NOTHROW; GC_NOTRIGGER; MODE_ANY; HOST_NOCALLS; SUPPORTS_DAC; } CONTRACT_END; VASigCookie *pVASigCookie = GetVASigCookie(); UpdateRegDisplayHelper(pRD, pVASigCookie->sizeOfArgs+sizeof(int)); LOG((LF_GCROOTS, LL_INFO100000, "STACKWALK PInvokeCalliFrame::UpdateRegDisplay(ip:%p, sp:%p)\n", pRD->ControlPC, pRD->SP)); RETURN; } #ifndef UNIX_X86_ABI void TailCallFrame::UpdateRegDisplay(const PREGDISPLAY pRD) { CONTRACT_VOID { NOTHROW; GC_NOTRIGGER; MODE_ANY; HOST_NOCALLS; SUPPORTS_DAC; } CONTRACT_END; pRD->PCTAddr = GetReturnAddressPtr(); #ifdef FEATURE_EH_FUNCLETS pRD->IsCallerContextValid = FALSE; pRD->IsCallerSPValid = FALSE; // Don't add usage of this field. This is only temporary. pRD->pCurrentContext->Eip = *PTR_PCODE(pRD->PCTAddr); pRD->pCurrentContext->Esp = (DWORD)(pRD->PCTAddr + sizeof(TADDR)); UpdateRegDisplayFromCalleeSavedRegisters(pRD, &m_regs); ClearRegDisplayArgumentAndScratchRegisters(pRD); SyncRegDisplayToCurrentContext(pRD); #else // reset pContext; it's only valid for active (top-most) frame pRD->pContext = NULL; #define CALLEE_SAVED_REGISTER(regname) pRD->p##regname = (DWORD*) &m_regs.regname; ENUM_CALLEE_SAVED_REGISTERS(); #undef CALLEE_SAVED_REGISTER pRD->ControlPC = *PTR_PCODE(pRD->PCTAddr); pRD->SP = (DWORD)(pRD->PCTAddr + sizeof(TADDR)); #endif LOG((LF_GCROOTS, LL_INFO100000, "STACKWALK TailCallFrame::UpdateRegDisplay(ip:%p, sp:%p)\n", pRD->ControlPC, pRD->SP)); RETURN; } #endif // !UNIX_X86_ABI #ifdef FEATURE_READYTORUN void DynamicHelperFrame::UpdateRegDisplay(const PREGDISPLAY pRD) { WRAPPER_NO_CONTRACT; UpdateRegDisplayHelper(pRD, 0); } #endif // FEATURE_READYTORUN //------------------------------------------------------------------------ // This is declared as returning WORD instead of PRD_TYPE because of // header issues with cgencpu.h including dbginterface.h. WORD GetUnpatchedCodeData(LPCBYTE pAddr) { #ifndef TARGET_X86 #error Make sure this works before porting to platforms other than x86. #endif CONTRACT(WORD) { NOTHROW; GC_NOTRIGGER; PRECONDITION(CORDebuggerAttached()); PRECONDITION(CheckPointer(pAddr)); } CONTRACT_END; // Ordering is because x86 is little-endien. BYTE bLow = pAddr[0]; BYTE bHigh = pAddr[1]; #ifndef DACCESS_COMPILE // Need to make sure that the code we're reading is free of breakpoint patches. PRD_TYPE unpatchedOpcode; if (g_pDebugInterface->CheckGetPatchedOpcode((CORDB_ADDRESS_TYPE *)pAddr, &unpatchedOpcode)) { // PRD_TYPE is supposed to be an opaque debugger structure representing data to remove a patch. // Although PRD_TYPE is currently typedef'ed to be a DWORD_PTR, it's actually semantically just a BYTE. // (since a patch on x86 is just an 0xCC instruction). // Ideally, the debugger subsystem would expose a patch-code stripper that returns BYTE/WORD/etc, and // not force us to crack it ourselves here. bLow = (BYTE) unpatchedOpcode; } // #endif WORD w = bLow + (bHigh << 8); RETURN w; } #ifndef DACCESS_COMPILE Stub *GenerateInitPInvokeFrameHelper() { CONTRACT(Stub*) { STANDARD_VM_CHECK; POSTCONDITION(CheckPointer(RETVAL)); } CONTRACT_END; CPUSTUBLINKER sl; CPUSTUBLINKER *psl = &sl; CORINFO_EE_INFO::InlinedCallFrameInfo FrameInfo; InlinedCallFrame::GetEEInfo(&FrameInfo); // EDI contains address of the frame on stack (the frame ptr, not its negspace) unsigned negSpace = FrameInfo.offsetOfFrameVptr; // mov esi, GetThread() psl->X86EmitCurrentThreadFetch(kESI, (1 << kEDI) | (1 << kEBX) | (1 << kECX) | (1 << kEDX)); // mov [edi + FrameInfo.offsetOfGSCookie], GetProcessGSCookie() psl->X86EmitOffsetModRM(0xc7, (X86Reg)0x0, kEDI, FrameInfo.offsetOfGSCookie - negSpace); psl->Emit32(GetProcessGSCookie()); // mov [edi + FrameInfo.offsetOfFrameVptr], InlinedCallFrame::GetFrameVtable() psl->X86EmitOffsetModRM(0xc7, (X86Reg)0x0, kEDI, FrameInfo.offsetOfFrameVptr - negSpace); psl->Emit32(InlinedCallFrame::GetMethodFrameVPtr()); // mov eax, [esi + offsetof(Thread, m_pFrame)] // mov [edi + FrameInfo.offsetOfFrameLink], eax psl->X86EmitIndexRegLoad(kEAX, kESI, offsetof(Thread, m_pFrame)); psl->X86EmitIndexRegStore(kEDI, FrameInfo.offsetOfFrameLink - negSpace, kEAX); // mov [edi + FrameInfo.offsetOfCalleeSavedEbp], ebp psl->X86EmitIndexRegStore(kEDI, FrameInfo.offsetOfCalleeSavedFP - negSpace, kEBP); // mov [edi + FrameInfo.offsetOfReturnAddress], 0 psl->X86EmitOffsetModRM(0xc7, (X86Reg)0x0, kEDI, FrameInfo.offsetOfReturnAddress - negSpace); psl->Emit32(0); // mov [esi + offsetof(Thread, m_pFrame)], edi psl->X86EmitIndexRegStore(kESI, offsetof(Thread, m_pFrame), kEDI); // leave current Thread in ESI psl->X86EmitReturn(0); // A single process-wide stub that will never unload RETURN psl->Link(SystemDomain::GetGlobalLoaderAllocator()->GetExecutableHeap()); } extern "C" VOID STDCALL StubRareEnableWorker(Thread *pThread) { WRAPPER_NO_CONTRACT; //printf("RareEnable\n"); pThread->RareEnablePreemptiveGC(); } // Disable when calling into managed code from a place that fails via Exceptions extern "C" VOID STDCALL StubRareDisableTHROWWorker(Thread *pThread) { STATIC_CONTRACT_THROWS; STATIC_CONTRACT_GC_TRIGGERS; // Do not add a CONTRACT here. We haven't set up SEH. // WARNING!!!! // when we start executing here, we are actually in cooperative mode. But we // haven't synchronized with the barrier to reentry yet. So we are in a highly // dangerous mode. If we call managed code, we will potentially be active in // the GC heap, even as GC's are occuring! // We must do the following in this order, because otherwise we would be constructing // the exception for the abort without synchronizing with the GC. Also, we have no // CLR SEH set up, despite the fact that we may throw a ThreadAbortException. pThread->RareDisablePreemptiveGC(); pThread->HandleThreadAbort(); } ////////////////////////////////////////////////////////////////////////////// // // JITInterface // ////////////////////////////////////////////////////////////////////////////// /*********************************************************************/ #ifdef EnC_SUPPORTED #pragma warning (disable : 4731) void ResumeAtJit(PCONTEXT pContext, LPVOID oldESP) { // No CONTRACT here, because we can't run the risk of it pushing any SEH into the // current method. STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; #ifdef _DEBUG DWORD curESP; __asm mov curESP, esp #endif if (oldESP) { _ASSERTE(curESP < (DWORD)(size_t)oldESP); // should have popped the SEH records by now as stack has been overwritten _ASSERTE(GetCurrentSEHRecord() > oldESP); } // For the "push Eip, ..., ret" _ASSERTE(curESP < pContext->Esp - sizeof(DWORD)); pContext->Esp -= sizeof(DWORD); __asm { mov ebp, pContext // Push Eip onto the targetESP, so that the final "ret" will consume it mov ecx, [ebp]CONTEXT.Esp mov edx, [ebp]CONTEXT.Eip mov [ecx], edx // Restore all registers except Esp, Ebp, Eip mov eax, [ebp]CONTEXT.Eax mov ebx, [ebp]CONTEXT.Ebx mov ecx, [ebp]CONTEXT.Ecx mov edx, [ebp]CONTEXT.Edx mov esi, [ebp]CONTEXT.Esi mov edi, [ebp]CONTEXT.Edi push [ebp]CONTEXT.Esp // pContext->Esp is (targetESP-sizeof(DWORD)) push [ebp]CONTEXT.Ebp pop ebp pop esp // esp is (targetESP-sizeof(DWORD)), and [esp] is the targetEIP. // The ret will set eip to targetEIP and esp will be automatically // incremented to targetESP ret } } #pragma warning (default : 4731) #endif // !EnC_SUPPORTED #ifndef TARGET_UNIX #pragma warning(push) #pragma warning(disable: 4035) extern "C" DWORD __stdcall xmmYmmStateSupport() { // No CONTRACT STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; __asm { mov ecx, 0 ; Specify xcr0 xgetbv ; result in EDX:EAX and eax, 06H cmp eax, 06H ; check OS has enabled both XMM and YMM state support jne not_supported mov eax, 1 jmp done not_supported: mov eax, 0 done: } } #pragma warning(pop) #else // !TARGET_UNIX void __cpuid(int cpuInfo[4], int function_id) { // Based on the Clang implementation provided in cpuid.h: // https://github.com/llvm/llvm-project/blob/master/clang/lib/Headers/cpuid.h __asm(" cpuid" : "=a"(cpuInfo[0]), "=b"(cpuInfo[1]), "=c"(cpuInfo[2]), "=d"(cpuInfo[3]) \ : "0"(function_id) ); } void __cpuidex(int cpuInfo[4], int function_id, int subFunction_id) { // Based on the Clang implementation provided in cpuid.h: // https://github.com/llvm/llvm-project/blob/master/clang/lib/Headers/cpuid.h __asm(" cpuid" : "=a"(cpuInfo[0]), "=b"(cpuInfo[1]), "=c"(cpuInfo[2]), "=d"(cpuInfo[3]) \ : "0"(function_id), "2"(subFunction_id) ); } extern "C" DWORD __stdcall xmmYmmStateSupport() { DWORD eax; __asm(" xgetbv\n" \ : "=a"(eax) /*output in eax*/\ : "c"(0) /*inputs - 0 in ecx*/\ : "edx" /* registers that are clobbered*/ ); // check OS has enabled both XMM and YMM state support return ((eax & 0x06) == 0x06) ? 1 : 0; } #endif // !TARGET_UNIX void UMEntryThunkCode::Encode(UMEntryThunkCode *pEntryThunkCodeRX, BYTE* pTargetCode, void* pvSecretParam) { LIMITED_METHOD_CONTRACT; #ifdef _DEBUG m_alignpad[0] = X86_INSTR_INT3; m_alignpad[1] = X86_INSTR_INT3; #endif // _DEBUG m_movEAX = X86_INSTR_MOV_EAX_IMM32; m_uet = pvSecretParam; m_jmp = X86_INSTR_JMP_REL32; m_execstub = (BYTE*) ((pTargetCode) - (4+((BYTE*)&pEntryThunkCodeRX->m_execstub))); FlushInstructionCache(GetCurrentProcess(),pEntryThunkCodeRX->GetEntryPoint(),sizeof(UMEntryThunkCode)); } void UMEntryThunkCode::Poison() { LIMITED_METHOD_CONTRACT; ExecutableWriterHolder<UMEntryThunkCode> thunkWriterHolder(this, sizeof(UMEntryThunkCode)); UMEntryThunkCode *pThisRW = thunkWriterHolder.GetRW(); pThisRW->m_execstub = (BYTE*) ((BYTE*)UMEntryThunk::ReportViolation - (4+((BYTE*)&m_execstub))); // mov ecx, imm32 pThisRW->m_movEAX = 0xb9; ClrFlushInstructionCache(GetEntryPoint(),sizeof(UMEntryThunkCode)); } UMEntryThunk* UMEntryThunk::Decode(LPVOID pCallback) { LIMITED_METHOD_CONTRACT; if (*((BYTE*)pCallback) != X86_INSTR_MOV_EAX_IMM32 || ( ((size_t)pCallback) & 3) != 2) { return NULL; } return *(UMEntryThunk**)( 1 + (BYTE*)pCallback ); } BOOL DoesSlotCallPrestub(PCODE pCode) { CONTRACTL { NOTHROW; GC_NOTRIGGER; PRECONDITION(pCode != NULL); PRECONDITION(pCode != GetPreStubEntryPoint()); } CONTRACTL_END; // x86 has the following possible sequences for prestub logic: // 1. slot -> temporary entrypoint -> prestub // 2. slot -> precode -> prestub // 3. slot -> precode -> jumprel32 (NGEN case) -> prestub #ifdef HAS_COMPACT_ENTRYPOINTS if (MethodDescChunk::GetMethodDescFromCompactEntryPoint(pCode, TRUE) != NULL) { return TRUE; } #endif // HAS_COMPACT_ENTRYPOINTS if (!IS_ALIGNED(pCode, PRECODE_ALIGNMENT)) { return FALSE; } #ifdef HAS_FIXUP_PRECODE if (*PTR_BYTE(pCode) == X86_INSTR_CALL_REL32) { // Note that call could have been patched to jmp in the meantime pCode = rel32Decode(pCode+1); // NGEN case if (*PTR_BYTE(pCode) == X86_INSTR_JMP_REL32) { pCode = rel32Decode(pCode+1); } return pCode == (TADDR)PrecodeFixupThunk; } #endif if (*PTR_BYTE(pCode) != X86_INSTR_MOV_EAX_IMM32 || *PTR_BYTE(pCode+5) != X86_INSTR_MOV_RM_R || *PTR_BYTE(pCode+7) != X86_INSTR_JMP_REL32) { return FALSE; } pCode = rel32Decode(pCode+8); // NGEN case if (*PTR_BYTE(pCode) == X86_INSTR_JMP_REL32) { pCode = rel32Decode(pCode+1); } return pCode == GetPreStubEntryPoint(); } #ifdef FEATURE_READYTORUN // // Allocation of dynamic helpers // #define DYNAMIC_HELPER_ALIGNMENT sizeof(TADDR) #define BEGIN_DYNAMIC_HELPER_EMIT(size) \ SIZE_T cb = size; \ SIZE_T cbAligned = ALIGN_UP(cb, DYNAMIC_HELPER_ALIGNMENT); \ BYTE * pStartRX = (BYTE *)(void*)pAllocator->GetDynamicHelpersHeap()->AllocAlignedMem(cbAligned, DYNAMIC_HELPER_ALIGNMENT); \ ExecutableWriterHolder<BYTE> startWriterHolder(pStartRX, cbAligned); \ BYTE * pStart = startWriterHolder.GetRW(); \ size_t rxOffset = pStartRX - pStart; \ BYTE * p = pStart; #define END_DYNAMIC_HELPER_EMIT() \ _ASSERTE(pStart + cb == p); \ while (p < pStart + cbAligned) *p++ = X86_INSTR_INT3; \ ClrFlushInstructionCache(pStartRX, cbAligned); \ return (PCODE)pStartRX PCODE DynamicHelpers::CreateHelper(LoaderAllocator * pAllocator, TADDR arg, PCODE target) { STANDARD_VM_CONTRACT; BEGIN_DYNAMIC_HELPER_EMIT(10); *p++ = 0xB9; // mov ecx, XXXXXX *(INT32 *)p = (INT32)arg; p += 4; *p++ = X86_INSTR_JMP_REL32; // jmp rel32 *(INT32 *)p = rel32UsingJumpStub((INT32 *)(p + rxOffset), target); p += 4; END_DYNAMIC_HELPER_EMIT(); } void DynamicHelpers::EmitHelperWithArg(BYTE*& p, size_t rxOffset, LoaderAllocator * pAllocator, TADDR arg, PCODE target) { CONTRACTL { GC_NOTRIGGER; PRECONDITION(p != NULL && target != NULL); } CONTRACTL_END; // Move an an argument into the second argument register and jump to a target function. *p++ = 0xBA; // mov edx, XXXXXX *(INT32 *)p = (INT32)arg; p += 4; *p++ = X86_INSTR_JMP_REL32; // jmp rel32 *(INT32 *)p = rel32UsingJumpStub((INT32 *)(p + rxOffset), target); p += 4; } PCODE DynamicHelpers::CreateHelperWithArg(LoaderAllocator * pAllocator, TADDR arg, PCODE target) { BEGIN_DYNAMIC_HELPER_EMIT(10); EmitHelperWithArg(p, rxOffset, pAllocator, arg, target); END_DYNAMIC_HELPER_EMIT(); } PCODE DynamicHelpers::CreateHelper(LoaderAllocator * pAllocator, TADDR arg, TADDR arg2, PCODE target) { BEGIN_DYNAMIC_HELPER_EMIT(15); *p++ = 0xB9; // mov ecx, XXXXXX *(INT32 *)p = (INT32)arg; p += 4; *p++ = 0xBA; // mov edx, XXXXXX *(INT32 *)p = (INT32)arg2; p += 4; *p++ = X86_INSTR_JMP_REL32; // jmp rel32 *(INT32 *)p = rel32UsingJumpStub((INT32 *)(p + rxOffset), target); p += 4; END_DYNAMIC_HELPER_EMIT(); } PCODE DynamicHelpers::CreateHelperArgMove(LoaderAllocator * pAllocator, TADDR arg, PCODE target) { BEGIN_DYNAMIC_HELPER_EMIT(12); *(UINT16 *)p = 0xD18B; // mov edx, ecx p += 2; *p++ = 0xB9; // mov ecx, XXXXXX *(INT32 *)p = (INT32)arg; p += 4; *p++ = X86_INSTR_JMP_REL32; // jmp rel32 *(INT32 *)p = rel32UsingJumpStub((INT32 *)(p + rxOffset), target); p += 4; END_DYNAMIC_HELPER_EMIT(); } PCODE DynamicHelpers::CreateReturn(LoaderAllocator * pAllocator) { BEGIN_DYNAMIC_HELPER_EMIT(1); *p++ = 0xC3; // ret END_DYNAMIC_HELPER_EMIT(); } PCODE DynamicHelpers::CreateReturnConst(LoaderAllocator * pAllocator, TADDR arg) { BEGIN_DYNAMIC_HELPER_EMIT(6); *p++ = 0xB8; // mov eax, XXXXXX *(INT32 *)p = (INT32)arg; p += 4; *p++ = 0xC3; // ret END_DYNAMIC_HELPER_EMIT(); } PCODE DynamicHelpers::CreateReturnIndirConst(LoaderAllocator * pAllocator, TADDR arg, INT8 offset) { BEGIN_DYNAMIC_HELPER_EMIT((offset != 0) ? 9 : 6); *p++ = 0xA1; // mov eax, [XXXXXX] *(INT32 *)p = (INT32)arg; p += 4; if (offset != 0) { // add eax, <offset> *p++ = 0x83; *p++ = 0xC0; *p++ = offset; } *p++ = 0xC3; // ret END_DYNAMIC_HELPER_EMIT(); } EXTERN_C VOID DynamicHelperArgsStub(); PCODE DynamicHelpers::CreateHelperWithTwoArgs(LoaderAllocator * pAllocator, TADDR arg, PCODE target) { #ifdef UNIX_X86_ABI BEGIN_DYNAMIC_HELPER_EMIT(18); #else BEGIN_DYNAMIC_HELPER_EMIT(12); #endif #ifdef UNIX_X86_ABI // sub esp, 8 *p++ = 0x83; *p++ = 0xec; *p++ = 0x8; #else // pop eax *p++ = 0x58; #endif // push arg *p++ = 0x68; *(INT32 *)p = arg; p += 4; #ifdef UNIX_X86_ABI // mov eax, target *p++ = 0xB8; *(INT32 *)p = target; p += 4; #else // push eax *p++ = 0x50; #endif *p++ = X86_INSTR_JMP_REL32; // jmp rel32 #ifdef UNIX_X86_ABI *(INT32 *)p = rel32UsingJumpStub((INT32 *)(p + rxOffset), (PCODE)DynamicHelperArgsStub); #else *(INT32 *)p = rel32UsingJumpStub((INT32 *)(p + rxOffset), target); #endif p += 4; END_DYNAMIC_HELPER_EMIT(); } PCODE DynamicHelpers::CreateHelperWithTwoArgs(LoaderAllocator * pAllocator, TADDR arg, TADDR arg2, PCODE target) { #ifdef UNIX_X86_ABI BEGIN_DYNAMIC_HELPER_EMIT(23); #else BEGIN_DYNAMIC_HELPER_EMIT(17); #endif #ifdef UNIX_X86_ABI // sub esp, 4 *p++ = 0x83; *p++ = 0xec; *p++ = 0x4; #else // pop eax *p++ = 0x58; #endif // push arg *p++ = 0x68; *(INT32 *)p = arg; p += 4; // push arg2 *p++ = 0x68; *(INT32 *)p = arg2; p += 4; #ifdef UNIX_X86_ABI // mov eax, target *p++ = 0xB8; *(INT32 *)p = target; p += 4; #else // push eax *p++ = 0x50; #endif *p++ = X86_INSTR_JMP_REL32; // jmp rel32 #ifdef UNIX_X86_ABI *(INT32 *)p = rel32UsingJumpStub((INT32 *)(p + rxOffset), (PCODE)DynamicHelperArgsStub); #else *(INT32 *)p = rel32UsingJumpStub((INT32 *)(p + rxOffset), target); #endif p += 4; END_DYNAMIC_HELPER_EMIT(); } PCODE DynamicHelpers::CreateDictionaryLookupHelper(LoaderAllocator * pAllocator, CORINFO_RUNTIME_LOOKUP * pLookup, DWORD dictionaryIndexAndSlot, Module * pModule) { STANDARD_VM_CONTRACT; PCODE helperAddress = (pLookup->helper == CORINFO_HELP_RUNTIMEHANDLE_METHOD ? GetEEFuncEntryPoint(JIT_GenericHandleMethodWithSlotAndModule) : GetEEFuncEntryPoint(JIT_GenericHandleClassWithSlotAndModule)); GenericHandleArgs * pArgs = (GenericHandleArgs *)(void *)pAllocator->GetDynamicHelpersHeap()->AllocAlignedMem(sizeof(GenericHandleArgs), DYNAMIC_HELPER_ALIGNMENT); ExecutableWriterHolder<GenericHandleArgs> argsWriterHolder(pArgs, sizeof(GenericHandleArgs)); argsWriterHolder.GetRW()->dictionaryIndexAndSlot = dictionaryIndexAndSlot; argsWriterHolder.GetRW()->signature = pLookup->signature; argsWriterHolder.GetRW()->module = (CORINFO_MODULE_HANDLE)pModule; WORD slotOffset = (WORD)(dictionaryIndexAndSlot & 0xFFFF) * sizeof(Dictionary*); // It's available only via the run-time helper function if (pLookup->indirections == CORINFO_USEHELPER) { BEGIN_DYNAMIC_HELPER_EMIT(10); // ecx contains the generic context parameter // mov edx,pArgs // jmp helperAddress EmitHelperWithArg(p, rxOffset, pAllocator, (TADDR)pArgs, helperAddress); END_DYNAMIC_HELPER_EMIT(); } else { int indirectionsSize = 0; for (WORD i = 0; i < pLookup->indirections; i++) indirectionsSize += (pLookup->offsets[i] >= 0x80 ? 6 : 3); int codeSize = indirectionsSize + (pLookup->testForNull ? 15 : 1) + (pLookup->sizeOffset != CORINFO_NO_SIZE_CHECK ? 12 : 0); BEGIN_DYNAMIC_HELPER_EMIT(codeSize); BYTE* pJLECall = NULL; for (WORD i = 0; i < pLookup->indirections; i++) { if (i == pLookup->indirections - 1 && pLookup->sizeOffset != CORINFO_NO_SIZE_CHECK) { _ASSERTE(pLookup->testForNull && i > 0); // cmp dword ptr[eax + sizeOffset],slotOffset *(UINT16*)p = 0xb881; p += 2; *(UINT32*)p = (UINT32)pLookup->sizeOffset; p += 4; *(UINT32*)p = (UINT32)slotOffset; p += 4; // jle 'HELPER CALL' *p++ = 0x7e; pJLECall = p++; // Offset filled later } // Move from ecx if it's the first indirection, otherwise from eax // mov eax,dword ptr [ecx|eax + offset] if (pLookup->offsets[i] >= 0x80) { *(UINT16*)p = (i == 0 ? 0x818b : 0x808b); p += 2; *(UINT32*)p = (UINT32)pLookup->offsets[i]; p += 4; } else { *(UINT16*)p = (i == 0 ? 0x418b : 0x408b); p += 2; *p++ = (BYTE)pLookup->offsets[i]; } } // No null test required if (!pLookup->testForNull) { _ASSERTE(pLookup->sizeOffset == CORINFO_NO_SIZE_CHECK); // No fixups needed for R2R *p++ = 0xC3; // ret } else { // ecx contains the value of the dictionary slot entry _ASSERTE(pLookup->indirections != 0); // test eax,eax *(UINT16*)p = 0xc085; p += 2; // je 'HELPER_CALL' (a jump of 1 byte) *(UINT16*)p = 0x0174; p += 2; *p++ = 0xC3; // ret // 'HELPER_CALL' { if (pJLECall != NULL) *pJLECall = (BYTE)(p - pJLECall - 1); // ecx already contains the generic context parameter // mov edx,pArgs // jmp helperAddress EmitHelperWithArg(p, rxOffset, pAllocator, (TADDR)pArgs, helperAddress); } } END_DYNAMIC_HELPER_EMIT(); } } #endif // FEATURE_READYTORUN #endif // DACCESS_COMPILE
-1
dotnet/runtime
66,367
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled
When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
jakobbotsch
2022-03-09T00:01:53Z
2022-03-09T12:59:09Z
c9f7f7389e8e9a00d501aef696333b67d218baac
cef825fd5425203ac28d073bf9fccc33c638f179
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled. When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
./src/coreclr/pal/tests/palsuite/c_runtime/atan2f/test1/test1.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*===================================================================== ** ** Source: test1.c ** ** Purpose: Tests that atan2f returns correct values for a subset of values. ** Tests with positive and negative values of x and y to ensure ** atan2f is returning results from the correct quadrant. ** **===================================================================*/ #include <palsuite.h> // binary32 (float) has a machine epsilon of 2^-23 (approx. 1.19e-07). However, this // is slightly too accurate when writing tests meant to run against libm implementations // for various platforms. 2^-21 (approx. 4.76e-07) seems to be as accurate as we can get. // // The tests themselves will take PAL_EPSILON and adjust it according to the expected result // so that the delta used for comparison will compare the most significant digits and ignore // any digits that are outside the double precision range (6-9 digits). // For example, a test with an expect result in the format of 0.xxxxxxxxx will use PAL_EPSILON // for the variance, while an expected result in the format of 0.0xxxxxxxxx will use // PAL_EPSILON / 10 and and expected result in the format of x.xxxxxx will use PAL_EPSILON * 10. #define PAL_EPSILON 4.76837158e-07 #define PAL_NAN sqrtf(-1.0f) #define PAL_POSINF -logf(0.0f) #define PAL_NEGINF logf(0.0f) struct test { float y; /* second component of the value to test the function with */ float x; /* first component of the value to test the function with */ float expected; /* expected result */ float variance; /* maximum delta between the expected and actual result */ }; /** * atan2f_test1_validate * * test validation function */ void __cdecl atan2f_test1_validate(float y, float x, float expected, float variance) { float result = atan2f(y, x); /* * The test is valid when the difference between result * and expected is less than or equal to variance */ float delta = fabsf(result - expected); if (delta > variance) { Fail("atan2f(%g, %g) returned %10.9g when it should have returned %10.9g", y, x, result, expected); } } /** * atan2f_test1_validate * * test validation function for values returning NaN */ void __cdecl atan2f_test1_validate_isnan(float y, float x) { float result = atan2f(y, x); if (!_isnanf(result)) { Fail("atan2f(%g, %g) returned %10.9g when it should have returned %10.9g", y, x, result, PAL_NAN); } } /** * main * * executable entry point */ PALTEST(c_runtime_atan2f_test1_paltest_atan2f_test1, "c_runtime/atan2f/test1/paltest_atan2f_test1") { struct test tests[] = { /* y x expected variance */ { 0, PAL_POSINF, 0, PAL_EPSILON }, { 0, 0, 0, PAL_EPSILON }, { 0.312961796f, 0.949765715f, 0.318309886f, PAL_EPSILON }, // expected: 1 / pi { 0.420770483f, 0.907167129f, 0.434294482f, PAL_EPSILON }, // expected: logf10f(e) { 0.594480769f, 0.804109828f, 0.636619772f, PAL_EPSILON }, // expected: 2 / pi { 0.638961276f, 0.769238901f, 0.693147181f, PAL_EPSILON }, // expected: ln(2) { 0.649636939f, 0.760244597f, 0.707106781f, PAL_EPSILON }, // expected: 1 / sqrtf(2) { 0.707106781f, 0.707106781f, 0.785398163f, PAL_EPSILON }, // expected: pi / 4, value: 1 / sqrtf(2) { 1, 1, 0.785398163f, PAL_EPSILON }, // expected: pi / 4 { PAL_POSINF, PAL_POSINF, 0.785398163f, PAL_EPSILON }, // expected: pi / 4 { 0.841470985f, 0.540302306f, 1, PAL_EPSILON * 10 }, { 0.903719457f, 0.428125148f, 1.12837917f, PAL_EPSILON * 10 }, // expected: 2 / sqrtf(pi) { 0.987765946f, 0.155943695f, 1.41421356f, PAL_EPSILON * 10 }, // expected: sqrtf(2) { 0.991806244f, 0.127751218f, 1.44269504f, PAL_EPSILON * 10 }, // expected: logf2(e) { 1, 0, 1.57079633f, PAL_EPSILON * 10 }, // expected: pi / 2 { PAL_POSINF, 0, 1.57079633f, PAL_EPSILON * 10 }, // expected: pi / 2 { PAL_POSINF, 1, 1.57079633f, PAL_EPSILON * 10 }, // expected: pi / 2 { 0.743980337f, -0.668201510f, 2.30258509f, PAL_EPSILON * 10 }, // expected: ln(10) { 0.410781291f, -0.911733915f, 2.71828183f, PAL_EPSILON * 10 }, // expected: e { 0, -1, 3.14159265f, PAL_EPSILON * 10 }, // expected: pi { 1, PAL_POSINF, 0, PAL_EPSILON }, }; if (PAL_Initialize(argc, argv) != 0) { return FAIL; } for (int i = 0; i < (sizeof(tests) / sizeof(struct test)); i++) { const float pi = 3.14159265f; atan2f_test1_validate( tests[i].y, tests[i].x, tests[i].expected, tests[i].variance); atan2f_test1_validate(-tests[i].y, tests[i].x, -tests[i].expected, tests[i].variance); atan2f_test1_validate( tests[i].y, -tests[i].x, pi - tests[i].expected, tests[i].variance); atan2f_test1_validate(-tests[i].y, -tests[i].x, tests[i].expected - pi, tests[i].variance); } atan2f_test1_validate_isnan(PAL_NEGINF, PAL_NAN); atan2f_test1_validate_isnan(PAL_NAN, PAL_NEGINF); atan2f_test1_validate_isnan(PAL_NAN, PAL_POSINF); atan2f_test1_validate_isnan(PAL_POSINF, PAL_NAN); atan2f_test1_validate_isnan(PAL_NAN, -1); atan2f_test1_validate_isnan(PAL_NAN, -0.0f); atan2f_test1_validate_isnan(PAL_NAN, 0); atan2f_test1_validate_isnan(PAL_NAN, 1); atan2f_test1_validate_isnan(-1, PAL_NAN); atan2f_test1_validate_isnan(-0.0f, PAL_NAN); atan2f_test1_validate_isnan( 0, PAL_NAN); atan2f_test1_validate_isnan( 1, PAL_NAN); atan2f_test1_validate_isnan(PAL_NAN, PAL_NAN); PAL_Terminate(); return PASS; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*===================================================================== ** ** Source: test1.c ** ** Purpose: Tests that atan2f returns correct values for a subset of values. ** Tests with positive and negative values of x and y to ensure ** atan2f is returning results from the correct quadrant. ** **===================================================================*/ #include <palsuite.h> // binary32 (float) has a machine epsilon of 2^-23 (approx. 1.19e-07). However, this // is slightly too accurate when writing tests meant to run against libm implementations // for various platforms. 2^-21 (approx. 4.76e-07) seems to be as accurate as we can get. // // The tests themselves will take PAL_EPSILON and adjust it according to the expected result // so that the delta used for comparison will compare the most significant digits and ignore // any digits that are outside the double precision range (6-9 digits). // For example, a test with an expect result in the format of 0.xxxxxxxxx will use PAL_EPSILON // for the variance, while an expected result in the format of 0.0xxxxxxxxx will use // PAL_EPSILON / 10 and and expected result in the format of x.xxxxxx will use PAL_EPSILON * 10. #define PAL_EPSILON 4.76837158e-07 #define PAL_NAN sqrtf(-1.0f) #define PAL_POSINF -logf(0.0f) #define PAL_NEGINF logf(0.0f) struct test { float y; /* second component of the value to test the function with */ float x; /* first component of the value to test the function with */ float expected; /* expected result */ float variance; /* maximum delta between the expected and actual result */ }; /** * atan2f_test1_validate * * test validation function */ void __cdecl atan2f_test1_validate(float y, float x, float expected, float variance) { float result = atan2f(y, x); /* * The test is valid when the difference between result * and expected is less than or equal to variance */ float delta = fabsf(result - expected); if (delta > variance) { Fail("atan2f(%g, %g) returned %10.9g when it should have returned %10.9g", y, x, result, expected); } } /** * atan2f_test1_validate * * test validation function for values returning NaN */ void __cdecl atan2f_test1_validate_isnan(float y, float x) { float result = atan2f(y, x); if (!_isnanf(result)) { Fail("atan2f(%g, %g) returned %10.9g when it should have returned %10.9g", y, x, result, PAL_NAN); } } /** * main * * executable entry point */ PALTEST(c_runtime_atan2f_test1_paltest_atan2f_test1, "c_runtime/atan2f/test1/paltest_atan2f_test1") { struct test tests[] = { /* y x expected variance */ { 0, PAL_POSINF, 0, PAL_EPSILON }, { 0, 0, 0, PAL_EPSILON }, { 0.312961796f, 0.949765715f, 0.318309886f, PAL_EPSILON }, // expected: 1 / pi { 0.420770483f, 0.907167129f, 0.434294482f, PAL_EPSILON }, // expected: logf10f(e) { 0.594480769f, 0.804109828f, 0.636619772f, PAL_EPSILON }, // expected: 2 / pi { 0.638961276f, 0.769238901f, 0.693147181f, PAL_EPSILON }, // expected: ln(2) { 0.649636939f, 0.760244597f, 0.707106781f, PAL_EPSILON }, // expected: 1 / sqrtf(2) { 0.707106781f, 0.707106781f, 0.785398163f, PAL_EPSILON }, // expected: pi / 4, value: 1 / sqrtf(2) { 1, 1, 0.785398163f, PAL_EPSILON }, // expected: pi / 4 { PAL_POSINF, PAL_POSINF, 0.785398163f, PAL_EPSILON }, // expected: pi / 4 { 0.841470985f, 0.540302306f, 1, PAL_EPSILON * 10 }, { 0.903719457f, 0.428125148f, 1.12837917f, PAL_EPSILON * 10 }, // expected: 2 / sqrtf(pi) { 0.987765946f, 0.155943695f, 1.41421356f, PAL_EPSILON * 10 }, // expected: sqrtf(2) { 0.991806244f, 0.127751218f, 1.44269504f, PAL_EPSILON * 10 }, // expected: logf2(e) { 1, 0, 1.57079633f, PAL_EPSILON * 10 }, // expected: pi / 2 { PAL_POSINF, 0, 1.57079633f, PAL_EPSILON * 10 }, // expected: pi / 2 { PAL_POSINF, 1, 1.57079633f, PAL_EPSILON * 10 }, // expected: pi / 2 { 0.743980337f, -0.668201510f, 2.30258509f, PAL_EPSILON * 10 }, // expected: ln(10) { 0.410781291f, -0.911733915f, 2.71828183f, PAL_EPSILON * 10 }, // expected: e { 0, -1, 3.14159265f, PAL_EPSILON * 10 }, // expected: pi { 1, PAL_POSINF, 0, PAL_EPSILON }, }; if (PAL_Initialize(argc, argv) != 0) { return FAIL; } for (int i = 0; i < (sizeof(tests) / sizeof(struct test)); i++) { const float pi = 3.14159265f; atan2f_test1_validate( tests[i].y, tests[i].x, tests[i].expected, tests[i].variance); atan2f_test1_validate(-tests[i].y, tests[i].x, -tests[i].expected, tests[i].variance); atan2f_test1_validate( tests[i].y, -tests[i].x, pi - tests[i].expected, tests[i].variance); atan2f_test1_validate(-tests[i].y, -tests[i].x, tests[i].expected - pi, tests[i].variance); } atan2f_test1_validate_isnan(PAL_NEGINF, PAL_NAN); atan2f_test1_validate_isnan(PAL_NAN, PAL_NEGINF); atan2f_test1_validate_isnan(PAL_NAN, PAL_POSINF); atan2f_test1_validate_isnan(PAL_POSINF, PAL_NAN); atan2f_test1_validate_isnan(PAL_NAN, -1); atan2f_test1_validate_isnan(PAL_NAN, -0.0f); atan2f_test1_validate_isnan(PAL_NAN, 0); atan2f_test1_validate_isnan(PAL_NAN, 1); atan2f_test1_validate_isnan(-1, PAL_NAN); atan2f_test1_validate_isnan(-0.0f, PAL_NAN); atan2f_test1_validate_isnan( 0, PAL_NAN); atan2f_test1_validate_isnan( 1, PAL_NAN); atan2f_test1_validate_isnan(PAL_NAN, PAL_NAN); PAL_Terminate(); return PASS; }
-1
dotnet/runtime
66,367
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled
When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
jakobbotsch
2022-03-09T00:01:53Z
2022-03-09T12:59:09Z
c9f7f7389e8e9a00d501aef696333b67d218baac
cef825fd5425203ac28d073bf9fccc33c638f179
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled. When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
./src/coreclr/debug/shim/debugshim.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. //***************************************************************************** // debugshim.cpp // // //***************************************************************************** #include "debugshim.h" #include "dbgutil.h" #include <crtdbg.h> #include <clrinternal.h> //has the CLR_ID_V4_DESKTOP guid in it #include "palclr.h" //***************************************************************************** // CLRDebuggingImpl implementation (ICLRDebugging) //***************************************************************************** typedef HRESULT (STDAPICALLTYPE *OpenVirtualProcessImpl2FnPtr)(ULONG64 clrInstanceId, IUnknown * pDataTarget, LPCWSTR pDacModulePath, CLR_DEBUGGING_VERSION * pMaxDebuggerSupportedVersion, REFIID riid, IUnknown ** ppInstance, CLR_DEBUGGING_PROCESS_FLAGS * pdwFlags); typedef HRESULT (STDAPICALLTYPE *OpenVirtualProcessImplFnPtr)(ULONG64 clrInstanceId, IUnknown * pDataTarget, HMODULE hDacDll, CLR_DEBUGGING_VERSION * pMaxDebuggerSupportedVersion, REFIID riid, IUnknown ** ppInstance, CLR_DEBUGGING_PROCESS_FLAGS * pdwFlags); typedef HRESULT (STDAPICALLTYPE *OpenVirtualProcess2FnPtr)(ULONG64 clrInstanceId, IUnknown * pDataTarget, HMODULE hDacDll, REFIID riid, IUnknown ** ppInstance, CLR_DEBUGGING_PROCESS_FLAGS * pdwFlags); typedef HMODULE (STDAPICALLTYPE *LoadLibraryWFnPtr)(LPCWSTR lpLibFileName); static bool IsTargetWindows(ICorDebugDataTarget* pDataTarget) { CorDebugPlatform targetPlatform; HRESULT result = pDataTarget->GetPlatform(&targetPlatform); if(FAILED(result)) { _ASSERTE(!"Unexpected error"); return false; } switch (targetPlatform) { case CORDB_PLATFORM_WINDOWS_X86: case CORDB_PLATFORM_WINDOWS_AMD64: case CORDB_PLATFORM_WINDOWS_IA64: case CORDB_PLATFORM_WINDOWS_ARM: case CORDB_PLATFORM_WINDOWS_ARM64: return true; default: return false; } } // Implementation of ICLRDebugging::OpenVirtualProcess // // Arguments: // moduleBaseAddress - the address of the module which might be a CLR // pDataTarget - the data target for inspecting the process // pLibraryProvider - a callback for locating DBI and DAC // pMaxDebuggerSupportedVersion - the max version of the CLR that this debugger will support debugging // riidProcess - the IID of the interface that should be passed back in ppProcess // ppProcess - output for the ICorDebugProcess# if this module is a CLR // pVersion - the CLR version if this module is a CLR // pFlags - output, see the CLR_DEBUGGING_PROCESS_FLAGS for more details. Right now this has only one possible // value which indicates this runtime had an unhandled exception STDMETHODIMP CLRDebuggingImpl::OpenVirtualProcess( ULONG64 moduleBaseAddress, IUnknown * pDataTarget, ICLRDebuggingLibraryProvider * pLibraryProvider, CLR_DEBUGGING_VERSION * pMaxDebuggerSupportedVersion, REFIID riidProcess, IUnknown ** ppProcess, CLR_DEBUGGING_VERSION * pVersion, CLR_DEBUGGING_PROCESS_FLAGS * pFlags) { //PRECONDITION(CheckPointer(pDataTarget)); HRESULT hr = S_OK; ICorDebugDataTarget * pDt = NULL; HMODULE hDbi = NULL; HMODULE hDac = NULL; LPWSTR pDacModulePath = NULL; LPWSTR pDbiModulePath = NULL; DWORD dbiTimestamp; DWORD dbiSizeOfImage; WCHAR dbiName[MAX_PATH_FNAME] = { 0 }; DWORD dacTimestamp; DWORD dacSizeOfImage; WCHAR dacName[MAX_PATH_FNAME] = { 0 }; CLR_DEBUGGING_VERSION version; BOOL versionSupportedByCaller = FALSE; // argument checking if ((ppProcess != NULL || pFlags != NULL) && pLibraryProvider == NULL) { hr = E_POINTER; // the library provider must be specified if either // ppProcess or pFlags is non-NULL } else if ((ppProcess != NULL || pFlags != NULL) && pMaxDebuggerSupportedVersion == NULL) { hr = E_POINTER; // the max supported version must be specified if either // ppProcess or pFlags is non-NULL } else if (pVersion != NULL && pVersion->wStructVersion != 0) { hr = CORDBG_E_UNSUPPORTED_VERSION_STRUCT; } else if (FAILED(pDataTarget->QueryInterface(__uuidof(ICorDebugDataTarget), (void**)&pDt))) { hr = CORDBG_E_MISSING_DATA_TARGET_INTERFACE; } if (SUCCEEDED(hr)) { // get CLR version // The expectation is that new versions of the CLR will continue to use the same GUID // (unless there's a reason to hide them from older shims), but debuggers will tell us the // CLR version they're designed for and mscordbi.dll can decide whether or not to accept it. version.wStructVersion = 0; hr = GetCLRInfo(pDt, moduleBaseAddress, &version, &dbiTimestamp, &dbiSizeOfImage, dbiName, MAX_PATH_FNAME, &dacTimestamp, &dacSizeOfImage, dacName, MAX_PATH_FNAME); } // If we need to fetch either the process info or the flags info then we need to find // mscordbi and DAC and do the version specific OVP work if (SUCCEEDED(hr) && (ppProcess != NULL || pFlags != NULL)) { ICLRDebuggingLibraryProvider2* pLibraryProvider2; if (SUCCEEDED(pLibraryProvider->QueryInterface(__uuidof(ICLRDebuggingLibraryProvider2), (void**)&pLibraryProvider2))) { if (FAILED(pLibraryProvider2->ProvideLibrary2(dbiName, dbiTimestamp, dbiSizeOfImage, &pDbiModulePath)) || pDbiModulePath == NULL) { hr = CORDBG_E_LIBRARY_PROVIDER_ERROR; } if (SUCCEEDED(hr)) { hDbi = LoadLibraryW(pDbiModulePath); if (hDbi == NULL) { hr = HRESULT_FROM_WIN32(GetLastError()); } } if (SUCCEEDED(hr)) { // Adjust the timestamp and size of image if this DAC is a known buggy version and needs to be retargeted RetargetDacIfNeeded(&dacTimestamp, &dacSizeOfImage); // Ask library provider for dac if (FAILED(pLibraryProvider2->ProvideLibrary2(dacName, dacTimestamp, dacSizeOfImage, &pDacModulePath)) || pDacModulePath == NULL) { hr = CORDBG_E_LIBRARY_PROVIDER_ERROR; } if (SUCCEEDED(hr)) { hDac = LoadLibraryW(pDacModulePath); if (hDac == NULL) { hr = HRESULT_FROM_WIN32(GetLastError()); } } } pLibraryProvider2->Release(); } else { // Ask library provider for dbi if (FAILED(pLibraryProvider->ProvideLibrary(dbiName, dbiTimestamp, dbiSizeOfImage, &hDbi)) || hDbi == NULL) { hr = CORDBG_E_LIBRARY_PROVIDER_ERROR; } if (SUCCEEDED(hr)) { // Adjust the timestamp and size of image if this DAC is a known buggy version and needs to be retargeted RetargetDacIfNeeded(&dacTimestamp, &dacSizeOfImage); // ask library provider for dac if (FAILED(pLibraryProvider->ProvideLibrary(dacName, dacTimestamp, dacSizeOfImage, &hDac)) || hDac == NULL) { hr = CORDBG_E_LIBRARY_PROVIDER_ERROR; } } } *ppProcess = NULL; if (SUCCEEDED(hr) && pDacModulePath != NULL) { // Get access to the latest OVP implementation and call it OpenVirtualProcessImpl2FnPtr ovpFn = (OpenVirtualProcessImpl2FnPtr)GetProcAddress(hDbi, "OpenVirtualProcessImpl2"); if (ovpFn != NULL) { hr = ovpFn(moduleBaseAddress, pDataTarget, pDacModulePath, pMaxDebuggerSupportedVersion, riidProcess, ppProcess, pFlags); if (FAILED(hr)) { _ASSERTE(ppProcess == NULL || *ppProcess == NULL); _ASSERTE(pFlags == NULL || *pFlags == 0); } } #ifdef HOST_UNIX else { // On Linux/MacOS the DAC module handle needs to be re-created using the DAC PAL instance // before being passed to DBI's OpenVirtualProcess* implementation. The DBI and DAC share // the same PAL where dbgshim has it's own. LoadLibraryWFnPtr loadLibraryWFn = (LoadLibraryWFnPtr)GetProcAddress(hDac, "LoadLibraryW"); if (loadLibraryWFn != NULL) { hDac = loadLibraryWFn(pDacModulePath); if (hDac == NULL) { hr = E_HANDLE; } } else { hr = E_HANDLE; } } #endif // HOST_UNIX } // If no errors so far and "OpenVirtualProcessImpl2" doesn't exist if (SUCCEEDED(hr) && *ppProcess == NULL) { // Get access to OVP and call it OpenVirtualProcessImplFnPtr ovpFn = (OpenVirtualProcessImplFnPtr)GetProcAddress(hDbi, "OpenVirtualProcessImpl"); if (ovpFn == NULL) { // Fallback to CLR v4 Beta1 path, but skip some of the checking we'd normally do (maxSupportedVersion, etc.) OpenVirtualProcess2FnPtr ovp2Fn = (OpenVirtualProcess2FnPtr)GetProcAddress(hDbi, "OpenVirtualProcess2"); if (ovp2Fn == NULL) { hr = CORDBG_E_LIBRARY_PROVIDER_ERROR; } else { hr = ovp2Fn(moduleBaseAddress, pDataTarget, hDac, riidProcess, ppProcess, pFlags); } } else { // Have a CLR v4 Beta2+ DBI, call it and let it do the version check hr = ovpFn(moduleBaseAddress, pDataTarget, hDac, pMaxDebuggerSupportedVersion, riidProcess, ppProcess, pFlags); if (FAILED(hr)) { _ASSERTE(ppProcess == NULL || *ppProcess == NULL); _ASSERTE(pFlags == NULL || *pFlags == 0); } } } } //version is still valid in some failure cases if (pVersion != NULL && (SUCCEEDED(hr) || (hr == CORDBG_E_UNSUPPORTED_DEBUGGING_MODEL) || (hr == CORDBG_E_UNSUPPORTED_FORWARD_COMPAT))) { memcpy(pVersion, &version, sizeof(CLR_DEBUGGING_VERSION)); } if (pDacModulePath != NULL) { #ifdef HOST_UNIX free(pDacModulePath); #else CoTaskMemFree(pDacModulePath); #endif } if (pDbiModulePath != NULL) { #ifdef HOST_UNIX free(pDbiModulePath); #else CoTaskMemFree(pDbiModulePath); #endif } // free the data target we QI'ed earlier if (pDt != NULL) { pDt->Release(); } return hr; } // Checks to see if this DAC is one of a known set of old DAC builds which contains an issue. // If so we retarget to a newer compatible version which has the bug fixed. This is done // by changing the PE information used to lookup the DAC. // // Arguments // pdwTimeStamp - on input, the timestamp of DAC as embedded in the CLR image // on output, a potentially new timestamp for an updated DAC to use // instead // pdwSizeOfImage - on input, the sizeOfImage of DAC as embedded in the CLR image // on output, a potentially new sizeOfImage for an updated DAC to use // instead VOID CLRDebuggingImpl::RetargetDacIfNeeded(DWORD* pdwTimeStamp, DWORD* pdwSizeOfImage) { // This code is auto generated by the CreateRetargetTable tool // on 3/4/2011 6:35 PM // and then copy-pasted here. // // // // Retarget the GDR1 amd64 build if( (*pdwTimeStamp == 0x4d536868) && (*pdwSizeOfImage == 0x17b000)) { *pdwTimeStamp = 0x4d71a160; *pdwSizeOfImage = 0x17b000; } // Retarget the GDR1 x86 build else if( (*pdwTimeStamp == 0x4d5368f2) && (*pdwSizeOfImage == 0x120000)) { *pdwTimeStamp = 0x4d71a14f; *pdwSizeOfImage = 0x120000; } // Retarget the RTM amd64 build else if( (*pdwTimeStamp == 0x4ba21fa7) && (*pdwSizeOfImage == 0x17b000)) { *pdwTimeStamp = 0x4d71a13c; *pdwSizeOfImage = 0x17b000; } // Retarget the RTM x86 build else if( (*pdwTimeStamp == 0x4ba1da25) && (*pdwSizeOfImage == 0x120000)) { *pdwTimeStamp = 0x4d71a128; *pdwSizeOfImage = 0x120000; } // This code is auto generated by the CreateRetargetTable tool // on 8/17/2011 1:28 AM // and then copy-pasted here. // // // // Retarget the GDR2 amd64 build else if( (*pdwTimeStamp == 0x4da428c7) && (*pdwSizeOfImage == 0x17b000)) { *pdwTimeStamp = 0x4e4b7bc2; *pdwSizeOfImage = 0x17b000; } // Retarget the GDR2 x86 build else if( (*pdwTimeStamp == 0x4da3fe52) && (*pdwSizeOfImage == 0x120000)) { *pdwTimeStamp = 0x4e4b7bb1; *pdwSizeOfImage = 0x120000; } // End auto-generated code } #define PE_FIXEDFILEINFO_SIGNATURE 0xFEEF04BD // The format of the special debugging resource we embed in CLRs starting in // v4 struct CLR_DEBUG_RESOURCE { DWORD dwVersion; GUID signature; DWORD dwDacTimeStamp; DWORD dwDacSizeOfImage; DWORD dwDbiTimeStamp; DWORD dwDbiSizeOfImage; }; // Checks to see if a module is a CLR and if so, fetches the debug data // from the embedded resource // // Arguments // pDataTarget - dataTarget for the process we are inspecting // moduleBaseAddress - base address of a module we should inspect // pVersion - output, the version of the CLR detected if this is a CLR // pdwDbiTimeStamp - the timestamp of DBI as embedded in the CLR image // pdwDbiSizeOfImage - the SizeOfImage of DBI as embedded in the CLR image // pDbiName - output, the filename of DBI (as calculated by this function but that might change) // dwDbiNameCharCount - input, the number of WCHARs in the buffer pointed to by pDbiName // pdwDacTimeStampe - the timestamp of DAC as embedded in the CLR image // pdwDacSizeOfImage - the SizeOfImage of DAC as embedded in the CLR image // pDacName - output, the filename of DAC (as calculated by this function but that might change) // dwDacNameCharCount - input, the number of WCHARs in the buffer pointed to by pDacName HRESULT CLRDebuggingImpl::GetCLRInfo(ICorDebugDataTarget* pDataTarget, ULONG64 moduleBaseAddress, CLR_DEBUGGING_VERSION* pVersion, DWORD* pdwDbiTimeStamp, DWORD* pdwDbiSizeOfImage, _Inout_updates_z_(dwDbiNameCharCount) WCHAR* pDbiName, DWORD dwDbiNameCharCount, DWORD* pdwDacTimeStamp, DWORD* pdwDacSizeOfImage, _Inout_updates_z_(dwDacNameCharCount) WCHAR* pDacName, DWORD dwDacNameCharCount) { #ifdef HOST_WINDOWS if(IsTargetWindows(pDataTarget)) { WORD imageFileMachine = 0; DWORD resourceSectionRVA = 0; HRESULT hr = GetMachineAndResourceSectionRVA(pDataTarget, moduleBaseAddress, &imageFileMachine, &resourceSectionRVA); // We want the version resource which has type = RT_VERSION = 16, name = 1, language = 0x409 DWORD versionResourceRVA = 0; DWORD versionResourceSize = 0; if(SUCCEEDED(hr)) { hr = GetResourceRvaFromResourceSectionRva(pDataTarget, moduleBaseAddress, resourceSectionRVA, 16, 1, 0x409, &versionResourceRVA, &versionResourceSize); } // At last we get our version info VS_FIXEDFILEINFO fixedFileInfo = {0}; if(SUCCEEDED(hr)) { // The version resource has 3 words, then the unicode string "VS_VERSION_INFO" // (16 WCHARS including the null terminator) // then padding to a 32-bit boundary, then the VS_FIXEDFILEINFO struct DWORD fixedFileInfoRVA = ((versionResourceRVA + 3*2 + 16*2 + 3)/4)*4; hr = ReadFromDataTarget(pDataTarget, moduleBaseAddress + fixedFileInfoRVA, (BYTE*)&fixedFileInfo, sizeof(fixedFileInfo)); } //Verify the signature on the version resource if(SUCCEEDED(hr) && fixedFileInfo.dwSignature != PE_FIXEDFILEINFO_SIGNATURE) { hr = CORDBG_E_NOT_CLR; } // Record the version information if(SUCCEEDED(hr)) { pVersion->wMajor = (WORD) (fixedFileInfo.dwProductVersionMS >> 16); pVersion->wMinor = (WORD) (fixedFileInfo.dwProductVersionMS & 0xFFFF); pVersion->wBuild = (WORD) (fixedFileInfo.dwProductVersionLS >> 16); pVersion->wRevision = (WORD) (fixedFileInfo.dwProductVersionLS & 0xFFFF); } // Now grab the special clr debug info resource // We may need to scan a few different names searching though... // 1) CLRDEBUGINFO<host_os><host_arch> where host_os = 'WINDOWS' or 'CORESYS' and host_arch = 'X86' or 'ARM' or 'AMD64' // 2) For back-compat if the host os is windows and the host architecture matches the target then CLRDEBUGINFO is used with no suffix. DWORD debugResourceRVA = 0; DWORD debugResourceSize = 0; BOOL useCrossPlatformNaming = FALSE; if(SUCCEEDED(hr)) { // the initial state is that we haven't found a proper resource HRESULT hrGetResource = E_FAIL; // First check for the resource which has type = RC_DATA = 10, name = "CLRDEBUGINFO<host_os><host_arch>", language = 0 #if defined (HOST_WINDOWS) && defined(HOST_X86) const WCHAR * resourceName = W("CLRDEBUGINFOWINDOWSX86"); #endif #if !defined (HOST_WINDOWS) && defined(HOST_X86) const WCHAR * resourceName = W("CLRDEBUGINFOCORESYSX86"); #endif #if defined (HOST_WINDOWS) && defined(HOST_AMD64) const WCHAR * resourceName = W("CLRDEBUGINFOWINDOWSAMD64"); #endif #if !defined (HOST_WINDOWS) && defined(HOST_AMD64) const WCHAR * resourceName = W("CLRDEBUGINFOCORESYSAMD64"); #endif #if defined (HOST_WINDOWS) && defined(HOST_ARM64) const WCHAR * resourceName = W("CLRDEBUGINFOWINDOWSARM64"); #endif #if !defined (HOST_WINDOWS) && defined(HOST_ARM64) const WCHAR * resourceName = W("CLRDEBUGINFOCORESYSARM64"); #endif #if defined (HOST_WINDOWS) && defined(HOST_ARM) const WCHAR * resourceName = W("CLRDEBUGINFOWINDOWSARM"); #endif #if !defined (HOST_WINDOWS) && defined(HOST_ARM) const WCHAR * resourceName = W("CLRDEBUGINFOCORESYSARM"); #endif hrGetResource = GetResourceRvaFromResourceSectionRvaByName(pDataTarget, moduleBaseAddress, resourceSectionRVA, 10, resourceName, 0, &debugResourceRVA, &debugResourceSize); useCrossPlatformNaming = SUCCEEDED(hrGetResource); #if defined(HOST_WINDOWS) && (defined(HOST_X86) || defined(HOST_AMD64) || defined(HOST_ARM)) #if defined(HOST_X86) #define _HOST_MACHINE_TYPE IMAGE_FILE_MACHINE_I386 #elif defined(HOST_AMD64) #define _HOST_MACHINE_TYPE IMAGE_FILE_MACHINE_AMD64 #elif defined(HOST_ARM) #define _HOST_MACHINE_TYPE IMAGE_FILE_MACHINE_ARMNT #endif // if this is windows, and if host_arch matches target arch then we can fallback to searching for CLRDEBUGINFO on failure if(FAILED(hrGetResource) && (imageFileMachine == _HOST_MACHINE_TYPE)) { hrGetResource = GetResourceRvaFromResourceSectionRvaByName(pDataTarget, moduleBaseAddress, resourceSectionRVA, 10, W("CLRDEBUGINFO"), 0, &debugResourceRVA, &debugResourceSize); } #undef _HOST_MACHINE_TYPE #endif // if the search failed, we don't recognize the CLR if(FAILED(hrGetResource)) hr = CORDBG_E_NOT_CLR; } CLR_DEBUG_RESOURCE debugResource; if(SUCCEEDED(hr) && debugResourceSize != sizeof(debugResource)) { hr = CORDBG_E_NOT_CLR; } // Get the special debug resource from the image and return the results if(SUCCEEDED(hr)) { hr = ReadFromDataTarget(pDataTarget, moduleBaseAddress + debugResourceRVA, (BYTE*)&debugResource, sizeof(debugResource)); } if(SUCCEEDED(hr) && (debugResource.dwVersion != 0)) { hr = CORDBG_E_NOT_CLR; } // The signature needs to match m_skuId exactly, except for m_skuId=CLR_ID_ONECORE_CLR which is // also compatible with the older CLR_ID_PHONE_CLR signature. if(SUCCEEDED(hr) && (debugResource.signature != m_skuId) && !( (debugResource.signature == CLR_ID_PHONE_CLR) && (m_skuId == CLR_ID_ONECORE_CLR) )) { hr = CORDBG_E_NOT_CLR; } if(SUCCEEDED(hr) && (debugResource.signature != CLR_ID_ONECORE_CLR) && useCrossPlatformNaming) { FormatLongDacModuleName(pDacName, dwDacNameCharCount, imageFileMachine, &fixedFileInfo); swprintf_s(pDbiName, dwDbiNameCharCount, W("%s_%s.dll"), MAIN_DBI_MODULE_NAME_W, W("x86")); } else { if(m_skuId == CLR_ID_V4_DESKTOP) swprintf_s(pDacName, dwDacNameCharCount, W("%s.dll"), CLR_DAC_MODULE_NAME_W); else swprintf_s(pDacName, dwDacNameCharCount, W("%s.dll"), CORECLR_DAC_MODULE_NAME_W); swprintf_s(pDbiName, dwDbiNameCharCount, W("%s.dll"), MAIN_DBI_MODULE_NAME_W); } if(SUCCEEDED(hr)) { *pdwDbiTimeStamp = debugResource.dwDbiTimeStamp; *pdwDbiSizeOfImage = debugResource.dwDbiSizeOfImage; *pdwDacTimeStamp = debugResource.dwDacTimeStamp; *pdwDacSizeOfImage = debugResource.dwDacSizeOfImage; } // any failure should be interpreted as this module not being a CLR if(FAILED(hr)) { return CORDBG_E_NOT_CLR; } else { return S_OK; } } else #endif // !HOST_WINDOWS { swprintf_s(pDacName, dwDacNameCharCount, W("%s"), MAKEDLLNAME_W(CORECLR_DAC_MODULE_NAME_W)); swprintf_s(pDbiName, dwDbiNameCharCount, W("%s"), MAKEDLLNAME_W(MAIN_DBI_MODULE_NAME_W)); pVersion->wMajor = 0; pVersion->wMinor = 0; pVersion->wBuild = 0; pVersion->wRevision = 0; *pdwDbiTimeStamp = 0; *pdwDbiSizeOfImage = 0; *pdwDacTimeStamp = 0; *pdwDacSizeOfImage = 0; return S_OK; } } // Formats the long name for DAC HRESULT CLRDebuggingImpl::FormatLongDacModuleName(_Inout_updates_z_(cchBuffer) WCHAR * pBuffer, DWORD cchBuffer, DWORD targetImageFileMachine, VS_FIXEDFILEINFO * pVersion) { #ifndef HOST_WINDOWS _ASSERTE(!"NYI"); return E_NOTIMPL; #endif #if defined(HOST_X86) const WCHAR* pHostArch = W("x86"); #elif defined(HOST_AMD64) const WCHAR* pHostArch = W("amd64"); #elif defined(HOST_ARM) const WCHAR* pHostArch = W("arm"); #elif defined(HOST_ARM64) const WCHAR* pHostArch = W("arm64"); #else _ASSERTE(!"Unknown host arch"); return E_NOTIMPL; #endif const WCHAR* pDacBaseName = NULL; if(m_skuId == CLR_ID_V4_DESKTOP) pDacBaseName = CLR_DAC_MODULE_NAME_W; else if(m_skuId == CLR_ID_CORECLR || m_skuId == CLR_ID_PHONE_CLR || m_skuId == CLR_ID_ONECORE_CLR) pDacBaseName = CORECLR_DAC_MODULE_NAME_W; else { _ASSERTE(!"Unknown SKU id"); return E_UNEXPECTED; } const WCHAR* pTargetArch = NULL; if(targetImageFileMachine == IMAGE_FILE_MACHINE_I386) { pTargetArch = W("x86"); } else if(targetImageFileMachine == IMAGE_FILE_MACHINE_AMD64) { pTargetArch = W("amd64"); } else if(targetImageFileMachine == IMAGE_FILE_MACHINE_ARMNT) { pTargetArch = W("arm"); } else if(targetImageFileMachine == IMAGE_FILE_MACHINE_ARM64) { pTargetArch = W("arm64"); } else { _ASSERTE(!"Unknown target image file machine type"); return E_INVALIDARG; } const WCHAR* pBuildFlavor = W(""); if(pVersion->dwFileFlags & VS_FF_DEBUG) { if(pVersion->dwFileFlags & VS_FF_SPECIALBUILD) pBuildFlavor = W(".dbg"); else pBuildFlavor = W(".chk"); } // WARNING: if you change the formatting make sure you recalculate the maximum // possible size string and verify callers pass a big enough buffer. This doesn't // have to be a tight estimate, just make sure its >= the biggest possible DAC name // and it can be calculated statically DWORD minCchBuffer = (DWORD) wcslen(CLR_DAC_MODULE_NAME_W) + (DWORD) wcslen(CORECLR_DAC_MODULE_NAME_W) + // max name 10 + // max host arch 10 + // max target arch 40 + // max version 10 + // max build flavor (DWORD) wcslen(W("name_host_target_version.flavor.dll")) + // max intermediate formatting chars 1; // null terminator // validate the output buffer is larger than our estimate above _ASSERTE(cchBuffer >= minCchBuffer); if(!(cchBuffer >= minCchBuffer)) return E_INVALIDARG; swprintf_s(pBuffer, cchBuffer, W("%s_%s_%s_%u.%u.%u.%02u%s.dll"), pDacBaseName, pHostArch, pTargetArch, pVersion->dwProductVersionMS >> 16, pVersion->dwProductVersionMS & 0xFFFF, pVersion->dwProductVersionLS >> 16, pVersion->dwProductVersionLS & 0xFFFF, pBuildFlavor); return S_OK; } // An implementation of ICLRDebugging::CanUnloadNow // // Arguments: // hModule - a handle to a module provided earlier by ProvideLibrary // // Returns: // S_OK if the library is no longer in use and can be unloaded, S_FALSE otherwise // STDMETHODIMP CLRDebuggingImpl::CanUnloadNow(HMODULE hModule) { // In V4 at least we don't support any unloading. HRESULT hr = S_FALSE; return hr; } STDMETHODIMP CLRDebuggingImpl::QueryInterface(REFIID riid, void **ppvObject) { HRESULT hr = S_OK; if (riid == __uuidof(IUnknown)) { IUnknown *pItf = static_cast<IUnknown *>(this); pItf->AddRef(); *ppvObject = pItf; } else if (riid == __uuidof(ICLRDebugging)) { ICLRDebugging *pItf = static_cast<ICLRDebugging *>(this); pItf->AddRef(); *ppvObject = pItf; } else hr = E_NOINTERFACE; return hr; } // Standard AddRef implementation ULONG CLRDebuggingImpl::AddRef() { return InterlockedIncrement(&m_cRef); } // Standard Release implementation. ULONG CLRDebuggingImpl::Release() { _ASSERTE(m_cRef > 0); ULONG cRef = InterlockedDecrement(&m_cRef); if (cRef == 0) delete this; // Relies on virtual dtor to work properly. return cRef; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. //***************************************************************************** // debugshim.cpp // // //***************************************************************************** #include "debugshim.h" #include "dbgutil.h" #include <crtdbg.h> #include <clrinternal.h> //has the CLR_ID_V4_DESKTOP guid in it #include "palclr.h" //***************************************************************************** // CLRDebuggingImpl implementation (ICLRDebugging) //***************************************************************************** typedef HRESULT (STDAPICALLTYPE *OpenVirtualProcessImpl2FnPtr)(ULONG64 clrInstanceId, IUnknown * pDataTarget, LPCWSTR pDacModulePath, CLR_DEBUGGING_VERSION * pMaxDebuggerSupportedVersion, REFIID riid, IUnknown ** ppInstance, CLR_DEBUGGING_PROCESS_FLAGS * pdwFlags); typedef HRESULT (STDAPICALLTYPE *OpenVirtualProcessImplFnPtr)(ULONG64 clrInstanceId, IUnknown * pDataTarget, HMODULE hDacDll, CLR_DEBUGGING_VERSION * pMaxDebuggerSupportedVersion, REFIID riid, IUnknown ** ppInstance, CLR_DEBUGGING_PROCESS_FLAGS * pdwFlags); typedef HRESULT (STDAPICALLTYPE *OpenVirtualProcess2FnPtr)(ULONG64 clrInstanceId, IUnknown * pDataTarget, HMODULE hDacDll, REFIID riid, IUnknown ** ppInstance, CLR_DEBUGGING_PROCESS_FLAGS * pdwFlags); typedef HMODULE (STDAPICALLTYPE *LoadLibraryWFnPtr)(LPCWSTR lpLibFileName); static bool IsTargetWindows(ICorDebugDataTarget* pDataTarget) { CorDebugPlatform targetPlatform; HRESULT result = pDataTarget->GetPlatform(&targetPlatform); if(FAILED(result)) { _ASSERTE(!"Unexpected error"); return false; } switch (targetPlatform) { case CORDB_PLATFORM_WINDOWS_X86: case CORDB_PLATFORM_WINDOWS_AMD64: case CORDB_PLATFORM_WINDOWS_IA64: case CORDB_PLATFORM_WINDOWS_ARM: case CORDB_PLATFORM_WINDOWS_ARM64: return true; default: return false; } } // Implementation of ICLRDebugging::OpenVirtualProcess // // Arguments: // moduleBaseAddress - the address of the module which might be a CLR // pDataTarget - the data target for inspecting the process // pLibraryProvider - a callback for locating DBI and DAC // pMaxDebuggerSupportedVersion - the max version of the CLR that this debugger will support debugging // riidProcess - the IID of the interface that should be passed back in ppProcess // ppProcess - output for the ICorDebugProcess# if this module is a CLR // pVersion - the CLR version if this module is a CLR // pFlags - output, see the CLR_DEBUGGING_PROCESS_FLAGS for more details. Right now this has only one possible // value which indicates this runtime had an unhandled exception STDMETHODIMP CLRDebuggingImpl::OpenVirtualProcess( ULONG64 moduleBaseAddress, IUnknown * pDataTarget, ICLRDebuggingLibraryProvider * pLibraryProvider, CLR_DEBUGGING_VERSION * pMaxDebuggerSupportedVersion, REFIID riidProcess, IUnknown ** ppProcess, CLR_DEBUGGING_VERSION * pVersion, CLR_DEBUGGING_PROCESS_FLAGS * pFlags) { //PRECONDITION(CheckPointer(pDataTarget)); HRESULT hr = S_OK; ICorDebugDataTarget * pDt = NULL; HMODULE hDbi = NULL; HMODULE hDac = NULL; LPWSTR pDacModulePath = NULL; LPWSTR pDbiModulePath = NULL; DWORD dbiTimestamp; DWORD dbiSizeOfImage; WCHAR dbiName[MAX_PATH_FNAME] = { 0 }; DWORD dacTimestamp; DWORD dacSizeOfImage; WCHAR dacName[MAX_PATH_FNAME] = { 0 }; CLR_DEBUGGING_VERSION version; BOOL versionSupportedByCaller = FALSE; // argument checking if ((ppProcess != NULL || pFlags != NULL) && pLibraryProvider == NULL) { hr = E_POINTER; // the library provider must be specified if either // ppProcess or pFlags is non-NULL } else if ((ppProcess != NULL || pFlags != NULL) && pMaxDebuggerSupportedVersion == NULL) { hr = E_POINTER; // the max supported version must be specified if either // ppProcess or pFlags is non-NULL } else if (pVersion != NULL && pVersion->wStructVersion != 0) { hr = CORDBG_E_UNSUPPORTED_VERSION_STRUCT; } else if (FAILED(pDataTarget->QueryInterface(__uuidof(ICorDebugDataTarget), (void**)&pDt))) { hr = CORDBG_E_MISSING_DATA_TARGET_INTERFACE; } if (SUCCEEDED(hr)) { // get CLR version // The expectation is that new versions of the CLR will continue to use the same GUID // (unless there's a reason to hide them from older shims), but debuggers will tell us the // CLR version they're designed for and mscordbi.dll can decide whether or not to accept it. version.wStructVersion = 0; hr = GetCLRInfo(pDt, moduleBaseAddress, &version, &dbiTimestamp, &dbiSizeOfImage, dbiName, MAX_PATH_FNAME, &dacTimestamp, &dacSizeOfImage, dacName, MAX_PATH_FNAME); } // If we need to fetch either the process info or the flags info then we need to find // mscordbi and DAC and do the version specific OVP work if (SUCCEEDED(hr) && (ppProcess != NULL || pFlags != NULL)) { ICLRDebuggingLibraryProvider2* pLibraryProvider2; if (SUCCEEDED(pLibraryProvider->QueryInterface(__uuidof(ICLRDebuggingLibraryProvider2), (void**)&pLibraryProvider2))) { if (FAILED(pLibraryProvider2->ProvideLibrary2(dbiName, dbiTimestamp, dbiSizeOfImage, &pDbiModulePath)) || pDbiModulePath == NULL) { hr = CORDBG_E_LIBRARY_PROVIDER_ERROR; } if (SUCCEEDED(hr)) { hDbi = LoadLibraryW(pDbiModulePath); if (hDbi == NULL) { hr = HRESULT_FROM_WIN32(GetLastError()); } } if (SUCCEEDED(hr)) { // Adjust the timestamp and size of image if this DAC is a known buggy version and needs to be retargeted RetargetDacIfNeeded(&dacTimestamp, &dacSizeOfImage); // Ask library provider for dac if (FAILED(pLibraryProvider2->ProvideLibrary2(dacName, dacTimestamp, dacSizeOfImage, &pDacModulePath)) || pDacModulePath == NULL) { hr = CORDBG_E_LIBRARY_PROVIDER_ERROR; } if (SUCCEEDED(hr)) { hDac = LoadLibraryW(pDacModulePath); if (hDac == NULL) { hr = HRESULT_FROM_WIN32(GetLastError()); } } } pLibraryProvider2->Release(); } else { // Ask library provider for dbi if (FAILED(pLibraryProvider->ProvideLibrary(dbiName, dbiTimestamp, dbiSizeOfImage, &hDbi)) || hDbi == NULL) { hr = CORDBG_E_LIBRARY_PROVIDER_ERROR; } if (SUCCEEDED(hr)) { // Adjust the timestamp and size of image if this DAC is a known buggy version and needs to be retargeted RetargetDacIfNeeded(&dacTimestamp, &dacSizeOfImage); // ask library provider for dac if (FAILED(pLibraryProvider->ProvideLibrary(dacName, dacTimestamp, dacSizeOfImage, &hDac)) || hDac == NULL) { hr = CORDBG_E_LIBRARY_PROVIDER_ERROR; } } } *ppProcess = NULL; if (SUCCEEDED(hr) && pDacModulePath != NULL) { // Get access to the latest OVP implementation and call it OpenVirtualProcessImpl2FnPtr ovpFn = (OpenVirtualProcessImpl2FnPtr)GetProcAddress(hDbi, "OpenVirtualProcessImpl2"); if (ovpFn != NULL) { hr = ovpFn(moduleBaseAddress, pDataTarget, pDacModulePath, pMaxDebuggerSupportedVersion, riidProcess, ppProcess, pFlags); if (FAILED(hr)) { _ASSERTE(ppProcess == NULL || *ppProcess == NULL); _ASSERTE(pFlags == NULL || *pFlags == 0); } } #ifdef HOST_UNIX else { // On Linux/MacOS the DAC module handle needs to be re-created using the DAC PAL instance // before being passed to DBI's OpenVirtualProcess* implementation. The DBI and DAC share // the same PAL where dbgshim has it's own. LoadLibraryWFnPtr loadLibraryWFn = (LoadLibraryWFnPtr)GetProcAddress(hDac, "LoadLibraryW"); if (loadLibraryWFn != NULL) { hDac = loadLibraryWFn(pDacModulePath); if (hDac == NULL) { hr = E_HANDLE; } } else { hr = E_HANDLE; } } #endif // HOST_UNIX } // If no errors so far and "OpenVirtualProcessImpl2" doesn't exist if (SUCCEEDED(hr) && *ppProcess == NULL) { // Get access to OVP and call it OpenVirtualProcessImplFnPtr ovpFn = (OpenVirtualProcessImplFnPtr)GetProcAddress(hDbi, "OpenVirtualProcessImpl"); if (ovpFn == NULL) { // Fallback to CLR v4 Beta1 path, but skip some of the checking we'd normally do (maxSupportedVersion, etc.) OpenVirtualProcess2FnPtr ovp2Fn = (OpenVirtualProcess2FnPtr)GetProcAddress(hDbi, "OpenVirtualProcess2"); if (ovp2Fn == NULL) { hr = CORDBG_E_LIBRARY_PROVIDER_ERROR; } else { hr = ovp2Fn(moduleBaseAddress, pDataTarget, hDac, riidProcess, ppProcess, pFlags); } } else { // Have a CLR v4 Beta2+ DBI, call it and let it do the version check hr = ovpFn(moduleBaseAddress, pDataTarget, hDac, pMaxDebuggerSupportedVersion, riidProcess, ppProcess, pFlags); if (FAILED(hr)) { _ASSERTE(ppProcess == NULL || *ppProcess == NULL); _ASSERTE(pFlags == NULL || *pFlags == 0); } } } } //version is still valid in some failure cases if (pVersion != NULL && (SUCCEEDED(hr) || (hr == CORDBG_E_UNSUPPORTED_DEBUGGING_MODEL) || (hr == CORDBG_E_UNSUPPORTED_FORWARD_COMPAT))) { memcpy(pVersion, &version, sizeof(CLR_DEBUGGING_VERSION)); } if (pDacModulePath != NULL) { #ifdef HOST_UNIX free(pDacModulePath); #else CoTaskMemFree(pDacModulePath); #endif } if (pDbiModulePath != NULL) { #ifdef HOST_UNIX free(pDbiModulePath); #else CoTaskMemFree(pDbiModulePath); #endif } // free the data target we QI'ed earlier if (pDt != NULL) { pDt->Release(); } return hr; } // Checks to see if this DAC is one of a known set of old DAC builds which contains an issue. // If so we retarget to a newer compatible version which has the bug fixed. This is done // by changing the PE information used to lookup the DAC. // // Arguments // pdwTimeStamp - on input, the timestamp of DAC as embedded in the CLR image // on output, a potentially new timestamp for an updated DAC to use // instead // pdwSizeOfImage - on input, the sizeOfImage of DAC as embedded in the CLR image // on output, a potentially new sizeOfImage for an updated DAC to use // instead VOID CLRDebuggingImpl::RetargetDacIfNeeded(DWORD* pdwTimeStamp, DWORD* pdwSizeOfImage) { // This code is auto generated by the CreateRetargetTable tool // on 3/4/2011 6:35 PM // and then copy-pasted here. // // // // Retarget the GDR1 amd64 build if( (*pdwTimeStamp == 0x4d536868) && (*pdwSizeOfImage == 0x17b000)) { *pdwTimeStamp = 0x4d71a160; *pdwSizeOfImage = 0x17b000; } // Retarget the GDR1 x86 build else if( (*pdwTimeStamp == 0x4d5368f2) && (*pdwSizeOfImage == 0x120000)) { *pdwTimeStamp = 0x4d71a14f; *pdwSizeOfImage = 0x120000; } // Retarget the RTM amd64 build else if( (*pdwTimeStamp == 0x4ba21fa7) && (*pdwSizeOfImage == 0x17b000)) { *pdwTimeStamp = 0x4d71a13c; *pdwSizeOfImage = 0x17b000; } // Retarget the RTM x86 build else if( (*pdwTimeStamp == 0x4ba1da25) && (*pdwSizeOfImage == 0x120000)) { *pdwTimeStamp = 0x4d71a128; *pdwSizeOfImage = 0x120000; } // This code is auto generated by the CreateRetargetTable tool // on 8/17/2011 1:28 AM // and then copy-pasted here. // // // // Retarget the GDR2 amd64 build else if( (*pdwTimeStamp == 0x4da428c7) && (*pdwSizeOfImage == 0x17b000)) { *pdwTimeStamp = 0x4e4b7bc2; *pdwSizeOfImage = 0x17b000; } // Retarget the GDR2 x86 build else if( (*pdwTimeStamp == 0x4da3fe52) && (*pdwSizeOfImage == 0x120000)) { *pdwTimeStamp = 0x4e4b7bb1; *pdwSizeOfImage = 0x120000; } // End auto-generated code } #define PE_FIXEDFILEINFO_SIGNATURE 0xFEEF04BD // The format of the special debugging resource we embed in CLRs starting in // v4 struct CLR_DEBUG_RESOURCE { DWORD dwVersion; GUID signature; DWORD dwDacTimeStamp; DWORD dwDacSizeOfImage; DWORD dwDbiTimeStamp; DWORD dwDbiSizeOfImage; }; // Checks to see if a module is a CLR and if so, fetches the debug data // from the embedded resource // // Arguments // pDataTarget - dataTarget for the process we are inspecting // moduleBaseAddress - base address of a module we should inspect // pVersion - output, the version of the CLR detected if this is a CLR // pdwDbiTimeStamp - the timestamp of DBI as embedded in the CLR image // pdwDbiSizeOfImage - the SizeOfImage of DBI as embedded in the CLR image // pDbiName - output, the filename of DBI (as calculated by this function but that might change) // dwDbiNameCharCount - input, the number of WCHARs in the buffer pointed to by pDbiName // pdwDacTimeStampe - the timestamp of DAC as embedded in the CLR image // pdwDacSizeOfImage - the SizeOfImage of DAC as embedded in the CLR image // pDacName - output, the filename of DAC (as calculated by this function but that might change) // dwDacNameCharCount - input, the number of WCHARs in the buffer pointed to by pDacName HRESULT CLRDebuggingImpl::GetCLRInfo(ICorDebugDataTarget* pDataTarget, ULONG64 moduleBaseAddress, CLR_DEBUGGING_VERSION* pVersion, DWORD* pdwDbiTimeStamp, DWORD* pdwDbiSizeOfImage, _Inout_updates_z_(dwDbiNameCharCount) WCHAR* pDbiName, DWORD dwDbiNameCharCount, DWORD* pdwDacTimeStamp, DWORD* pdwDacSizeOfImage, _Inout_updates_z_(dwDacNameCharCount) WCHAR* pDacName, DWORD dwDacNameCharCount) { #ifdef HOST_WINDOWS if(IsTargetWindows(pDataTarget)) { WORD imageFileMachine = 0; DWORD resourceSectionRVA = 0; HRESULT hr = GetMachineAndResourceSectionRVA(pDataTarget, moduleBaseAddress, &imageFileMachine, &resourceSectionRVA); // We want the version resource which has type = RT_VERSION = 16, name = 1, language = 0x409 DWORD versionResourceRVA = 0; DWORD versionResourceSize = 0; if(SUCCEEDED(hr)) { hr = GetResourceRvaFromResourceSectionRva(pDataTarget, moduleBaseAddress, resourceSectionRVA, 16, 1, 0x409, &versionResourceRVA, &versionResourceSize); } // At last we get our version info VS_FIXEDFILEINFO fixedFileInfo = {0}; if(SUCCEEDED(hr)) { // The version resource has 3 words, then the unicode string "VS_VERSION_INFO" // (16 WCHARS including the null terminator) // then padding to a 32-bit boundary, then the VS_FIXEDFILEINFO struct DWORD fixedFileInfoRVA = ((versionResourceRVA + 3*2 + 16*2 + 3)/4)*4; hr = ReadFromDataTarget(pDataTarget, moduleBaseAddress + fixedFileInfoRVA, (BYTE*)&fixedFileInfo, sizeof(fixedFileInfo)); } //Verify the signature on the version resource if(SUCCEEDED(hr) && fixedFileInfo.dwSignature != PE_FIXEDFILEINFO_SIGNATURE) { hr = CORDBG_E_NOT_CLR; } // Record the version information if(SUCCEEDED(hr)) { pVersion->wMajor = (WORD) (fixedFileInfo.dwProductVersionMS >> 16); pVersion->wMinor = (WORD) (fixedFileInfo.dwProductVersionMS & 0xFFFF); pVersion->wBuild = (WORD) (fixedFileInfo.dwProductVersionLS >> 16); pVersion->wRevision = (WORD) (fixedFileInfo.dwProductVersionLS & 0xFFFF); } // Now grab the special clr debug info resource // We may need to scan a few different names searching though... // 1) CLRDEBUGINFO<host_os><host_arch> where host_os = 'WINDOWS' or 'CORESYS' and host_arch = 'X86' or 'ARM' or 'AMD64' // 2) For back-compat if the host os is windows and the host architecture matches the target then CLRDEBUGINFO is used with no suffix. DWORD debugResourceRVA = 0; DWORD debugResourceSize = 0; BOOL useCrossPlatformNaming = FALSE; if(SUCCEEDED(hr)) { // the initial state is that we haven't found a proper resource HRESULT hrGetResource = E_FAIL; // First check for the resource which has type = RC_DATA = 10, name = "CLRDEBUGINFO<host_os><host_arch>", language = 0 #if defined (HOST_WINDOWS) && defined(HOST_X86) const WCHAR * resourceName = W("CLRDEBUGINFOWINDOWSX86"); #endif #if !defined (HOST_WINDOWS) && defined(HOST_X86) const WCHAR * resourceName = W("CLRDEBUGINFOCORESYSX86"); #endif #if defined (HOST_WINDOWS) && defined(HOST_AMD64) const WCHAR * resourceName = W("CLRDEBUGINFOWINDOWSAMD64"); #endif #if !defined (HOST_WINDOWS) && defined(HOST_AMD64) const WCHAR * resourceName = W("CLRDEBUGINFOCORESYSAMD64"); #endif #if defined (HOST_WINDOWS) && defined(HOST_ARM64) const WCHAR * resourceName = W("CLRDEBUGINFOWINDOWSARM64"); #endif #if !defined (HOST_WINDOWS) && defined(HOST_ARM64) const WCHAR * resourceName = W("CLRDEBUGINFOCORESYSARM64"); #endif #if defined (HOST_WINDOWS) && defined(HOST_ARM) const WCHAR * resourceName = W("CLRDEBUGINFOWINDOWSARM"); #endif #if !defined (HOST_WINDOWS) && defined(HOST_ARM) const WCHAR * resourceName = W("CLRDEBUGINFOCORESYSARM"); #endif hrGetResource = GetResourceRvaFromResourceSectionRvaByName(pDataTarget, moduleBaseAddress, resourceSectionRVA, 10, resourceName, 0, &debugResourceRVA, &debugResourceSize); useCrossPlatformNaming = SUCCEEDED(hrGetResource); #if defined(HOST_WINDOWS) && (defined(HOST_X86) || defined(HOST_AMD64) || defined(HOST_ARM)) #if defined(HOST_X86) #define _HOST_MACHINE_TYPE IMAGE_FILE_MACHINE_I386 #elif defined(HOST_AMD64) #define _HOST_MACHINE_TYPE IMAGE_FILE_MACHINE_AMD64 #elif defined(HOST_ARM) #define _HOST_MACHINE_TYPE IMAGE_FILE_MACHINE_ARMNT #endif // if this is windows, and if host_arch matches target arch then we can fallback to searching for CLRDEBUGINFO on failure if(FAILED(hrGetResource) && (imageFileMachine == _HOST_MACHINE_TYPE)) { hrGetResource = GetResourceRvaFromResourceSectionRvaByName(pDataTarget, moduleBaseAddress, resourceSectionRVA, 10, W("CLRDEBUGINFO"), 0, &debugResourceRVA, &debugResourceSize); } #undef _HOST_MACHINE_TYPE #endif // if the search failed, we don't recognize the CLR if(FAILED(hrGetResource)) hr = CORDBG_E_NOT_CLR; } CLR_DEBUG_RESOURCE debugResource; if(SUCCEEDED(hr) && debugResourceSize != sizeof(debugResource)) { hr = CORDBG_E_NOT_CLR; } // Get the special debug resource from the image and return the results if(SUCCEEDED(hr)) { hr = ReadFromDataTarget(pDataTarget, moduleBaseAddress + debugResourceRVA, (BYTE*)&debugResource, sizeof(debugResource)); } if(SUCCEEDED(hr) && (debugResource.dwVersion != 0)) { hr = CORDBG_E_NOT_CLR; } // The signature needs to match m_skuId exactly, except for m_skuId=CLR_ID_ONECORE_CLR which is // also compatible with the older CLR_ID_PHONE_CLR signature. if(SUCCEEDED(hr) && (debugResource.signature != m_skuId) && !( (debugResource.signature == CLR_ID_PHONE_CLR) && (m_skuId == CLR_ID_ONECORE_CLR) )) { hr = CORDBG_E_NOT_CLR; } if(SUCCEEDED(hr) && (debugResource.signature != CLR_ID_ONECORE_CLR) && useCrossPlatformNaming) { FormatLongDacModuleName(pDacName, dwDacNameCharCount, imageFileMachine, &fixedFileInfo); swprintf_s(pDbiName, dwDbiNameCharCount, W("%s_%s.dll"), MAIN_DBI_MODULE_NAME_W, W("x86")); } else { if(m_skuId == CLR_ID_V4_DESKTOP) swprintf_s(pDacName, dwDacNameCharCount, W("%s.dll"), CLR_DAC_MODULE_NAME_W); else swprintf_s(pDacName, dwDacNameCharCount, W("%s.dll"), CORECLR_DAC_MODULE_NAME_W); swprintf_s(pDbiName, dwDbiNameCharCount, W("%s.dll"), MAIN_DBI_MODULE_NAME_W); } if(SUCCEEDED(hr)) { *pdwDbiTimeStamp = debugResource.dwDbiTimeStamp; *pdwDbiSizeOfImage = debugResource.dwDbiSizeOfImage; *pdwDacTimeStamp = debugResource.dwDacTimeStamp; *pdwDacSizeOfImage = debugResource.dwDacSizeOfImage; } // any failure should be interpreted as this module not being a CLR if(FAILED(hr)) { return CORDBG_E_NOT_CLR; } else { return S_OK; } } else #endif // !HOST_WINDOWS { swprintf_s(pDacName, dwDacNameCharCount, W("%s"), MAKEDLLNAME_W(CORECLR_DAC_MODULE_NAME_W)); swprintf_s(pDbiName, dwDbiNameCharCount, W("%s"), MAKEDLLNAME_W(MAIN_DBI_MODULE_NAME_W)); pVersion->wMajor = 0; pVersion->wMinor = 0; pVersion->wBuild = 0; pVersion->wRevision = 0; *pdwDbiTimeStamp = 0; *pdwDbiSizeOfImage = 0; *pdwDacTimeStamp = 0; *pdwDacSizeOfImage = 0; return S_OK; } } // Formats the long name for DAC HRESULT CLRDebuggingImpl::FormatLongDacModuleName(_Inout_updates_z_(cchBuffer) WCHAR * pBuffer, DWORD cchBuffer, DWORD targetImageFileMachine, VS_FIXEDFILEINFO * pVersion) { #ifndef HOST_WINDOWS _ASSERTE(!"NYI"); return E_NOTIMPL; #endif #if defined(HOST_X86) const WCHAR* pHostArch = W("x86"); #elif defined(HOST_AMD64) const WCHAR* pHostArch = W("amd64"); #elif defined(HOST_ARM) const WCHAR* pHostArch = W("arm"); #elif defined(HOST_ARM64) const WCHAR* pHostArch = W("arm64"); #else _ASSERTE(!"Unknown host arch"); return E_NOTIMPL; #endif const WCHAR* pDacBaseName = NULL; if(m_skuId == CLR_ID_V4_DESKTOP) pDacBaseName = CLR_DAC_MODULE_NAME_W; else if(m_skuId == CLR_ID_CORECLR || m_skuId == CLR_ID_PHONE_CLR || m_skuId == CLR_ID_ONECORE_CLR) pDacBaseName = CORECLR_DAC_MODULE_NAME_W; else { _ASSERTE(!"Unknown SKU id"); return E_UNEXPECTED; } const WCHAR* pTargetArch = NULL; if(targetImageFileMachine == IMAGE_FILE_MACHINE_I386) { pTargetArch = W("x86"); } else if(targetImageFileMachine == IMAGE_FILE_MACHINE_AMD64) { pTargetArch = W("amd64"); } else if(targetImageFileMachine == IMAGE_FILE_MACHINE_ARMNT) { pTargetArch = W("arm"); } else if(targetImageFileMachine == IMAGE_FILE_MACHINE_ARM64) { pTargetArch = W("arm64"); } else { _ASSERTE(!"Unknown target image file machine type"); return E_INVALIDARG; } const WCHAR* pBuildFlavor = W(""); if(pVersion->dwFileFlags & VS_FF_DEBUG) { if(pVersion->dwFileFlags & VS_FF_SPECIALBUILD) pBuildFlavor = W(".dbg"); else pBuildFlavor = W(".chk"); } // WARNING: if you change the formatting make sure you recalculate the maximum // possible size string and verify callers pass a big enough buffer. This doesn't // have to be a tight estimate, just make sure its >= the biggest possible DAC name // and it can be calculated statically DWORD minCchBuffer = (DWORD) wcslen(CLR_DAC_MODULE_NAME_W) + (DWORD) wcslen(CORECLR_DAC_MODULE_NAME_W) + // max name 10 + // max host arch 10 + // max target arch 40 + // max version 10 + // max build flavor (DWORD) wcslen(W("name_host_target_version.flavor.dll")) + // max intermediate formatting chars 1; // null terminator // validate the output buffer is larger than our estimate above _ASSERTE(cchBuffer >= minCchBuffer); if(!(cchBuffer >= minCchBuffer)) return E_INVALIDARG; swprintf_s(pBuffer, cchBuffer, W("%s_%s_%s_%u.%u.%u.%02u%s.dll"), pDacBaseName, pHostArch, pTargetArch, pVersion->dwProductVersionMS >> 16, pVersion->dwProductVersionMS & 0xFFFF, pVersion->dwProductVersionLS >> 16, pVersion->dwProductVersionLS & 0xFFFF, pBuildFlavor); return S_OK; } // An implementation of ICLRDebugging::CanUnloadNow // // Arguments: // hModule - a handle to a module provided earlier by ProvideLibrary // // Returns: // S_OK if the library is no longer in use and can be unloaded, S_FALSE otherwise // STDMETHODIMP CLRDebuggingImpl::CanUnloadNow(HMODULE hModule) { // In V4 at least we don't support any unloading. HRESULT hr = S_FALSE; return hr; } STDMETHODIMP CLRDebuggingImpl::QueryInterface(REFIID riid, void **ppvObject) { HRESULT hr = S_OK; if (riid == __uuidof(IUnknown)) { IUnknown *pItf = static_cast<IUnknown *>(this); pItf->AddRef(); *ppvObject = pItf; } else if (riid == __uuidof(ICLRDebugging)) { ICLRDebugging *pItf = static_cast<ICLRDebugging *>(this); pItf->AddRef(); *ppvObject = pItf; } else hr = E_NOINTERFACE; return hr; } // Standard AddRef implementation ULONG CLRDebuggingImpl::AddRef() { return InterlockedIncrement(&m_cRef); } // Standard Release implementation. ULONG CLRDebuggingImpl::Release() { _ASSERTE(m_cRef > 0); ULONG cRef = InterlockedDecrement(&m_cRef); if (cRef == 0) delete this; // Relies on virtual dtor to work properly. return cRef; }
-1
dotnet/runtime
66,367
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled
When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
jakobbotsch
2022-03-09T00:01:53Z
2022-03-09T12:59:09Z
c9f7f7389e8e9a00d501aef696333b67d218baac
cef825fd5425203ac28d073bf9fccc33c638f179
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled. When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
./src/native/public/mono/metadata/details/mono-gc-types.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // #ifndef _MONO_METADATA_MONO_GC_TYPES_H #define _MONO_METADATA_MONO_GC_TYPES_H #include <mono/metadata/details/object-types.h> MONO_BEGIN_DECLS typedef int (*MonoGCReferences) (MonoObject *obj, MonoClass *klass, uintptr_t size, uintptr_t num, MonoObject **refs, uintptr_t *offsets, void *data); /** * This enum is used by the profiler API when reporting root registration. */ typedef enum { /** * Roots external to Mono. Embedders may only use this value. */ MONO_ROOT_SOURCE_EXTERNAL = 0, /** * Thread call stack. * * The \c key parameter is a thread ID as a \c uintptr_t. */ MONO_ROOT_SOURCE_STACK = 1, /** * Roots in the finalizer queue. This is a pseudo-root. */ MONO_ROOT_SOURCE_FINALIZER_QUEUE = 2, /** * Managed \c static variables. * * The \c key parameter is a \c MonoVTable pointer. */ MONO_ROOT_SOURCE_STATIC = 3, /** * Managed \c static variables with \c ThreadStaticAttribute. * * The \c key parameter is a thread ID as a \c uintptr_t. */ MONO_ROOT_SOURCE_THREAD_STATIC = 4, /** * Managed \c static variables with \c ContextStaticAttribute. * * The \c key parameter is a \c MonoAppContext pointer. */ MONO_ROOT_SOURCE_CONTEXT_STATIC = 5, /** * \c GCHandle structures. */ MONO_ROOT_SOURCE_GC_HANDLE = 6, /** * Roots in the just-in-time compiler. */ MONO_ROOT_SOURCE_JIT = 7, /** * Roots in the threading subsystem. * * The \c key parameter, if not \c NULL, is a thread ID as a \c uintptr_t. */ MONO_ROOT_SOURCE_THREADING = 8, /** * Roots in application domains. * * The \c key parameter, if not \c NULL, is a \c MonoDomain pointer. */ MONO_ROOT_SOURCE_DOMAIN = 9, /** * Roots in reflection code. * * The \c key parameter, if not \c NULL, is a \c MonoVTable pointer. */ MONO_ROOT_SOURCE_REFLECTION = 10, /** * Roots from P/Invoke or other marshaling infrastructure. */ MONO_ROOT_SOURCE_MARSHAL = 11, /** * Roots in the thread pool data structures. */ MONO_ROOT_SOURCE_THREAD_POOL = 12, /** * Roots in the debugger agent. */ MONO_ROOT_SOURCE_DEBUGGER = 13, /** * Roots in the runtime handle stack. This is a pseudo-root. * * The \c key parameter is a thread ID as a \c uintptr_t. */ MONO_ROOT_SOURCE_HANDLE = 14, /** * Roots in the ephemeron arrays. This is a pseudo-root. */ MONO_ROOT_SOURCE_EPHEMERON = 15, /** * Roots in the toggleref arrays. This is a pseudo-root. */ MONO_ROOT_SOURCE_TOGGLEREF = 16, } MonoGCRootSource; typedef enum { MONO_GC_HANDLE_TYPE_MIN = 0, MONO_GC_HANDLE_WEAK = MONO_GC_HANDLE_TYPE_MIN, MONO_GC_HANDLE_WEAK_TRACK_RESURRECTION, MONO_GC_HANDLE_NORMAL, MONO_GC_HANDLE_PINNED, MONO_GC_HANDLE_TYPE_MAX, } MonoGCHandleType; MONO_END_DECLS #endif /* _MONO_METADATA_MONO_GC_TYPES_H */
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // #ifndef _MONO_METADATA_MONO_GC_TYPES_H #define _MONO_METADATA_MONO_GC_TYPES_H #include <mono/metadata/details/object-types.h> MONO_BEGIN_DECLS typedef int (*MonoGCReferences) (MonoObject *obj, MonoClass *klass, uintptr_t size, uintptr_t num, MonoObject **refs, uintptr_t *offsets, void *data); /** * This enum is used by the profiler API when reporting root registration. */ typedef enum { /** * Roots external to Mono. Embedders may only use this value. */ MONO_ROOT_SOURCE_EXTERNAL = 0, /** * Thread call stack. * * The \c key parameter is a thread ID as a \c uintptr_t. */ MONO_ROOT_SOURCE_STACK = 1, /** * Roots in the finalizer queue. This is a pseudo-root. */ MONO_ROOT_SOURCE_FINALIZER_QUEUE = 2, /** * Managed \c static variables. * * The \c key parameter is a \c MonoVTable pointer. */ MONO_ROOT_SOURCE_STATIC = 3, /** * Managed \c static variables with \c ThreadStaticAttribute. * * The \c key parameter is a thread ID as a \c uintptr_t. */ MONO_ROOT_SOURCE_THREAD_STATIC = 4, /** * Managed \c static variables with \c ContextStaticAttribute. * * The \c key parameter is a \c MonoAppContext pointer. */ MONO_ROOT_SOURCE_CONTEXT_STATIC = 5, /** * \c GCHandle structures. */ MONO_ROOT_SOURCE_GC_HANDLE = 6, /** * Roots in the just-in-time compiler. */ MONO_ROOT_SOURCE_JIT = 7, /** * Roots in the threading subsystem. * * The \c key parameter, if not \c NULL, is a thread ID as a \c uintptr_t. */ MONO_ROOT_SOURCE_THREADING = 8, /** * Roots in application domains. * * The \c key parameter, if not \c NULL, is a \c MonoDomain pointer. */ MONO_ROOT_SOURCE_DOMAIN = 9, /** * Roots in reflection code. * * The \c key parameter, if not \c NULL, is a \c MonoVTable pointer. */ MONO_ROOT_SOURCE_REFLECTION = 10, /** * Roots from P/Invoke or other marshaling infrastructure. */ MONO_ROOT_SOURCE_MARSHAL = 11, /** * Roots in the thread pool data structures. */ MONO_ROOT_SOURCE_THREAD_POOL = 12, /** * Roots in the debugger agent. */ MONO_ROOT_SOURCE_DEBUGGER = 13, /** * Roots in the runtime handle stack. This is a pseudo-root. * * The \c key parameter is a thread ID as a \c uintptr_t. */ MONO_ROOT_SOURCE_HANDLE = 14, /** * Roots in the ephemeron arrays. This is a pseudo-root. */ MONO_ROOT_SOURCE_EPHEMERON = 15, /** * Roots in the toggleref arrays. This is a pseudo-root. */ MONO_ROOT_SOURCE_TOGGLEREF = 16, } MonoGCRootSource; typedef enum { MONO_GC_HANDLE_TYPE_MIN = 0, MONO_GC_HANDLE_WEAK = MONO_GC_HANDLE_TYPE_MIN, MONO_GC_HANDLE_WEAK_TRACK_RESURRECTION, MONO_GC_HANDLE_NORMAL, MONO_GC_HANDLE_PINNED, MONO_GC_HANDLE_TYPE_MAX, } MonoGCHandleType; MONO_END_DECLS #endif /* _MONO_METADATA_MONO_GC_TYPES_H */
-1
dotnet/runtime
66,367
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled
When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
jakobbotsch
2022-03-09T00:01:53Z
2022-03-09T12:59:09Z
c9f7f7389e8e9a00d501aef696333b67d218baac
cef825fd5425203ac28d073bf9fccc33c638f179
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled. When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
./src/coreclr/jit/typeinfo.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX typeInfo XX XX XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif #include "_typeinfo.h" bool Compiler::tiCompatibleWith(const typeInfo& child, const typeInfo& parent, bool normalisedForStack) const { return typeInfo::tiCompatibleWith(info.compCompHnd, child, parent, normalisedForStack); } bool Compiler::tiMergeCompatibleWith(const typeInfo& child, const typeInfo& parent, bool normalisedForStack) const { return typeInfo::tiMergeCompatibleWith(info.compCompHnd, child, parent, normalisedForStack); } bool Compiler::tiMergeToCommonParent(typeInfo* pDest, const typeInfo* pSrc, bool* changed) const { return typeInfo::tiMergeToCommonParent(info.compCompHnd, pDest, pSrc, changed); } static bool tiCompatibleWithByRef(COMP_HANDLE CompHnd, const typeInfo& child, const typeInfo& parent) { assert(parent.IsByRef()); if (!child.IsByRef()) { return false; } if (child.IsReadonlyByRef() && !parent.IsReadonlyByRef()) { return false; } // Byrefs are compatible if the underlying types are equivalent typeInfo childTarget = ::DereferenceByRef(child); typeInfo parentTarget = ::DereferenceByRef(parent); if (typeInfo::AreEquivalent(childTarget, parentTarget)) { return true; } // Make sure that both types have a valid m_cls if ((childTarget.IsType(TI_REF) || childTarget.IsType(TI_STRUCT)) && (parentTarget.IsType(TI_REF) || parentTarget.IsType(TI_STRUCT))) { return CompHnd->areTypesEquivalent(childTarget.GetClassHandle(), parentTarget.GetClassHandle()); } return false; } /***************************************************************************** * Verify child is compatible with the template parent. Basically, that * child is a "subclass" of parent -it can be substituted for parent * anywhere. Note that if parent contains fancy flags, such as "uninitialized" * , "is this ptr", or "has byref local/field" info, then child must also * contain those flags, otherwise FALSE will be returned ! * * Rules for determining compatibility: * * If parent is a primitive type or value class, then child must be the * same primitive type or value class. The exception is that the built in * value classes System/Boolean etc. are treated as synonyms for * TI_BYTE etc. * * If parent is a byref of a primitive type or value class, then child * must be a byref of the same (rules same as above case). * * Byrefs are compatible only with byrefs. * * If parent is an object, child must be a subclass of it, implement it * (if it is an interface), or be null. * * If parent is an array, child must be the same or subclassed array. * * If parent is a null objref, only null is compatible with it. * * If the "uninitialized", "by ref local/field", "this pointer" or other flags * are different, the items are incompatible. * * parent CANNOT be an undefined (dead) item. * */ bool typeInfo::tiCompatibleWith(COMP_HANDLE CompHnd, const typeInfo& child, const typeInfo& parent, bool normalisedForStack) { assert(child.IsDead() || !normalisedForStack || typeInfo::AreEquivalent(::NormaliseForStack(child), child)); assert(parent.IsDead() || !normalisedForStack || typeInfo::AreEquivalent(::NormaliseForStack(parent), parent)); if (typeInfo::AreEquivalent(child, parent)) { return true; } if (parent.IsUnboxedGenericTypeVar() || child.IsUnboxedGenericTypeVar()) { return false; // need to have had child == parent } else if (parent.IsType(TI_REF)) { // An uninitialized objRef is not compatible to initialized. if (child.IsUninitialisedObjRef() && !parent.IsUninitialisedObjRef()) { return false; } if (child.IsNullObjRef()) { // NULL can be any reference type return true; } if (!child.IsType(TI_REF)) { return false; } return CompHnd->canCast(child.m_cls, parent.m_cls); } else if (parent.IsType(TI_METHOD)) { if (!child.IsType(TI_METHOD)) { return false; } // Right now we don't bother merging method handles return false; } else if (parent.IsType(TI_STRUCT)) { if (!child.IsType(TI_STRUCT)) { return false; } // Structures are compatible if they are equivalent return CompHnd->areTypesEquivalent(child.m_cls, parent.m_cls); } else if (parent.IsByRef()) { return tiCompatibleWithByRef(CompHnd, child, parent); } #ifdef TARGET_64BIT // On 64-bit targets we have precise representation for native int, so these rules // represent the fact that the ECMA spec permits the implicit conversion // between an int32 and a native int. else if (parent.IsType(TI_INT) && typeInfo::AreEquivalent(nativeInt(), child)) { return true; } else if (typeInfo::AreEquivalent(nativeInt(), parent) && child.IsType(TI_INT)) { return true; } #endif // TARGET_64BIT return false; } bool typeInfo::tiMergeCompatibleWith(COMP_HANDLE CompHnd, const typeInfo& child, const typeInfo& parent, bool normalisedForStack) { if (!child.IsPermanentHomeByRef() && parent.IsPermanentHomeByRef()) { return false; } return typeInfo::tiCompatibleWith(CompHnd, child, parent, normalisedForStack); } /***************************************************************************** * Merge pDest and pSrc to find some commonality (e.g. a common parent). * Copy the result to pDest, marking it dead if no commonality can be found. * * null ^ null -> null * Object ^ null -> Object * [I4 ^ null -> [I4 * InputStream ^ OutputStream -> Stream * InputStream ^ NULL -> InputStream * [I4 ^ Object -> Object * [I4 ^ [Object -> Array * [I4 ^ [R8 -> Array * [Foo ^ I4 -> DEAD * [Foo ^ [I1 -> Array * [InputStream ^ [OutputStream -> Array * DEAD ^ X -> DEAD * [Intfc ^ [OutputStream -> Array * Intf ^ [OutputStream -> Object * [[InStream ^ [[OutStream -> Array * [[InStream ^ [OutStream -> Array * [[Foo ^ [Object -> Array * * Importantly: * [I1 ^ [U1 -> either [I1 or [U1 * etc. * * Also, System/Int32 and I4 merge -> I4, etc. * * Returns FALSE if the merge was completely incompatible (i.e. the item became * dead). * */ bool typeInfo::tiMergeToCommonParent(COMP_HANDLE CompHnd, typeInfo* pDest, const typeInfo* pSrc, bool* changed) { assert(pSrc->IsDead() || typeInfo::AreEquivalent(::NormaliseForStack(*pSrc), *pSrc)); assert(pDest->IsDead() || typeInfo::AreEquivalent(::NormaliseForStack(*pDest), *pDest)); // Merge the auxiliary information like "this" pointer tracking, etc... // Remember the pre-state, so we can tell if it changed. *changed = false; DWORD destFlagsBefore = pDest->m_flags; // This bit is only set if both pDest and pSrc have it set pDest->m_flags &= (pSrc->m_flags | ~TI_FLAG_THIS_PTR); // This bit is set if either pDest or pSrc have it set pDest->m_flags |= (pSrc->m_flags & TI_FLAG_UNINIT_OBJREF); // This bit is set if either pDest or pSrc have it set pDest->m_flags |= (pSrc->m_flags & TI_FLAG_BYREF_READONLY); // If the byref wasn't permanent home in both sides, then merge won't have the bit set pDest->m_flags &= (pSrc->m_flags | ~TI_FLAG_BYREF_PERMANENT_HOME); if (pDest->m_flags != destFlagsBefore) { *changed = true; } // OK the main event. Merge the main types if (typeInfo::AreEquivalent(*pDest, *pSrc)) { return true; } if (pDest->IsUnboxedGenericTypeVar() || pSrc->IsUnboxedGenericTypeVar()) { // Should have had *pDest == *pSrc goto FAIL; } if (pDest->IsType(TI_REF)) { if (pSrc->IsType(TI_NULL)) { // NULL can be any reference type return true; } if (!pSrc->IsType(TI_REF)) { goto FAIL; } // Ask the EE to find the common parent, This always succeeds since System.Object always works CORINFO_CLASS_HANDLE pDestClsBefore = pDest->m_cls; pDest->m_cls = CompHnd->mergeClasses(pDest->GetClassHandle(), pSrc->GetClassHandle()); if (pDestClsBefore != pDest->m_cls) { *changed = true; } return true; } else if (pDest->IsType(TI_NULL)) { if (pSrc->IsType(TI_REF)) // NULL can be any reference type { *pDest = *pSrc; *changed = true; return true; } goto FAIL; } else if (pDest->IsType(TI_STRUCT)) { if (pSrc->IsType(TI_STRUCT) && CompHnd->areTypesEquivalent(pDest->GetClassHandle(), pSrc->GetClassHandle())) { return true; } goto FAIL; } else if (pDest->IsByRef()) { return tiCompatibleWithByRef(CompHnd, *pSrc, *pDest); } #ifdef TARGET_64BIT // On 64-bit targets we have precise representation for native int, so these rules // represent the fact that the ECMA spec permits the implicit conversion // between an int32 and a native int. else if (typeInfo::AreEquivalent(*pDest, typeInfo::nativeInt()) && pSrc->IsType(TI_INT)) { return true; } else if (typeInfo::AreEquivalent(*pSrc, typeInfo::nativeInt()) && pDest->IsType(TI_INT)) { *pDest = *pSrc; *changed = true; return true; } #endif // TARGET_64BIT FAIL: *pDest = typeInfo(); return false; } #ifdef DEBUG #if VERBOSE_VERIFY // Utility method to have a detailed dump of a TypeInfo object void typeInfo::Dump() const { char flagsStr[8]; flagsStr[0] = ((m_flags & TI_FLAG_UNINIT_OBJREF) != 0) ? 'U' : '-'; flagsStr[1] = ((m_flags & TI_FLAG_BYREF) != 0) ? 'B' : '-'; flagsStr[2] = ((m_flags & TI_FLAG_BYREF_READONLY) != 0) ? 'R' : '-'; flagsStr[3] = ((m_flags & TI_FLAG_NATIVE_INT) != 0) ? 'N' : '-'; flagsStr[4] = ((m_flags & TI_FLAG_THIS_PTR) != 0) ? 'T' : '-'; flagsStr[5] = ((m_flags & TI_FLAG_BYREF_PERMANENT_HOME) != 0) ? 'P' : '-'; flagsStr[6] = ((m_flags & TI_FLAG_GENERIC_TYPE_VAR) != 0) ? 'G' : '-'; flagsStr[7] = '\0'; printf("[%s(%X) {%s}]", tiType2Str(m_bits.type), m_cls, flagsStr); } #endif // VERBOSE_VERIFY #endif // DEBUG
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX typeInfo XX XX XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif #include "_typeinfo.h" bool Compiler::tiCompatibleWith(const typeInfo& child, const typeInfo& parent, bool normalisedForStack) const { return typeInfo::tiCompatibleWith(info.compCompHnd, child, parent, normalisedForStack); } bool Compiler::tiMergeCompatibleWith(const typeInfo& child, const typeInfo& parent, bool normalisedForStack) const { return typeInfo::tiMergeCompatibleWith(info.compCompHnd, child, parent, normalisedForStack); } bool Compiler::tiMergeToCommonParent(typeInfo* pDest, const typeInfo* pSrc, bool* changed) const { return typeInfo::tiMergeToCommonParent(info.compCompHnd, pDest, pSrc, changed); } static bool tiCompatibleWithByRef(COMP_HANDLE CompHnd, const typeInfo& child, const typeInfo& parent) { assert(parent.IsByRef()); if (!child.IsByRef()) { return false; } if (child.IsReadonlyByRef() && !parent.IsReadonlyByRef()) { return false; } // Byrefs are compatible if the underlying types are equivalent typeInfo childTarget = ::DereferenceByRef(child); typeInfo parentTarget = ::DereferenceByRef(parent); if (typeInfo::AreEquivalent(childTarget, parentTarget)) { return true; } // Make sure that both types have a valid m_cls if ((childTarget.IsType(TI_REF) || childTarget.IsType(TI_STRUCT)) && (parentTarget.IsType(TI_REF) || parentTarget.IsType(TI_STRUCT))) { return CompHnd->areTypesEquivalent(childTarget.GetClassHandle(), parentTarget.GetClassHandle()); } return false; } /***************************************************************************** * Verify child is compatible with the template parent. Basically, that * child is a "subclass" of parent -it can be substituted for parent * anywhere. Note that if parent contains fancy flags, such as "uninitialized" * , "is this ptr", or "has byref local/field" info, then child must also * contain those flags, otherwise FALSE will be returned ! * * Rules for determining compatibility: * * If parent is a primitive type or value class, then child must be the * same primitive type or value class. The exception is that the built in * value classes System/Boolean etc. are treated as synonyms for * TI_BYTE etc. * * If parent is a byref of a primitive type or value class, then child * must be a byref of the same (rules same as above case). * * Byrefs are compatible only with byrefs. * * If parent is an object, child must be a subclass of it, implement it * (if it is an interface), or be null. * * If parent is an array, child must be the same or subclassed array. * * If parent is a null objref, only null is compatible with it. * * If the "uninitialized", "by ref local/field", "this pointer" or other flags * are different, the items are incompatible. * * parent CANNOT be an undefined (dead) item. * */ bool typeInfo::tiCompatibleWith(COMP_HANDLE CompHnd, const typeInfo& child, const typeInfo& parent, bool normalisedForStack) { assert(child.IsDead() || !normalisedForStack || typeInfo::AreEquivalent(::NormaliseForStack(child), child)); assert(parent.IsDead() || !normalisedForStack || typeInfo::AreEquivalent(::NormaliseForStack(parent), parent)); if (typeInfo::AreEquivalent(child, parent)) { return true; } if (parent.IsUnboxedGenericTypeVar() || child.IsUnboxedGenericTypeVar()) { return false; // need to have had child == parent } else if (parent.IsType(TI_REF)) { // An uninitialized objRef is not compatible to initialized. if (child.IsUninitialisedObjRef() && !parent.IsUninitialisedObjRef()) { return false; } if (child.IsNullObjRef()) { // NULL can be any reference type return true; } if (!child.IsType(TI_REF)) { return false; } return CompHnd->canCast(child.m_cls, parent.m_cls); } else if (parent.IsType(TI_METHOD)) { if (!child.IsType(TI_METHOD)) { return false; } // Right now we don't bother merging method handles return false; } else if (parent.IsType(TI_STRUCT)) { if (!child.IsType(TI_STRUCT)) { return false; } // Structures are compatible if they are equivalent return CompHnd->areTypesEquivalent(child.m_cls, parent.m_cls); } else if (parent.IsByRef()) { return tiCompatibleWithByRef(CompHnd, child, parent); } #ifdef TARGET_64BIT // On 64-bit targets we have precise representation for native int, so these rules // represent the fact that the ECMA spec permits the implicit conversion // between an int32 and a native int. else if (parent.IsType(TI_INT) && typeInfo::AreEquivalent(nativeInt(), child)) { return true; } else if (typeInfo::AreEquivalent(nativeInt(), parent) && child.IsType(TI_INT)) { return true; } #endif // TARGET_64BIT return false; } bool typeInfo::tiMergeCompatibleWith(COMP_HANDLE CompHnd, const typeInfo& child, const typeInfo& parent, bool normalisedForStack) { if (!child.IsPermanentHomeByRef() && parent.IsPermanentHomeByRef()) { return false; } return typeInfo::tiCompatibleWith(CompHnd, child, parent, normalisedForStack); } /***************************************************************************** * Merge pDest and pSrc to find some commonality (e.g. a common parent). * Copy the result to pDest, marking it dead if no commonality can be found. * * null ^ null -> null * Object ^ null -> Object * [I4 ^ null -> [I4 * InputStream ^ OutputStream -> Stream * InputStream ^ NULL -> InputStream * [I4 ^ Object -> Object * [I4 ^ [Object -> Array * [I4 ^ [R8 -> Array * [Foo ^ I4 -> DEAD * [Foo ^ [I1 -> Array * [InputStream ^ [OutputStream -> Array * DEAD ^ X -> DEAD * [Intfc ^ [OutputStream -> Array * Intf ^ [OutputStream -> Object * [[InStream ^ [[OutStream -> Array * [[InStream ^ [OutStream -> Array * [[Foo ^ [Object -> Array * * Importantly: * [I1 ^ [U1 -> either [I1 or [U1 * etc. * * Also, System/Int32 and I4 merge -> I4, etc. * * Returns FALSE if the merge was completely incompatible (i.e. the item became * dead). * */ bool typeInfo::tiMergeToCommonParent(COMP_HANDLE CompHnd, typeInfo* pDest, const typeInfo* pSrc, bool* changed) { assert(pSrc->IsDead() || typeInfo::AreEquivalent(::NormaliseForStack(*pSrc), *pSrc)); assert(pDest->IsDead() || typeInfo::AreEquivalent(::NormaliseForStack(*pDest), *pDest)); // Merge the auxiliary information like "this" pointer tracking, etc... // Remember the pre-state, so we can tell if it changed. *changed = false; DWORD destFlagsBefore = pDest->m_flags; // This bit is only set if both pDest and pSrc have it set pDest->m_flags &= (pSrc->m_flags | ~TI_FLAG_THIS_PTR); // This bit is set if either pDest or pSrc have it set pDest->m_flags |= (pSrc->m_flags & TI_FLAG_UNINIT_OBJREF); // This bit is set if either pDest or pSrc have it set pDest->m_flags |= (pSrc->m_flags & TI_FLAG_BYREF_READONLY); // If the byref wasn't permanent home in both sides, then merge won't have the bit set pDest->m_flags &= (pSrc->m_flags | ~TI_FLAG_BYREF_PERMANENT_HOME); if (pDest->m_flags != destFlagsBefore) { *changed = true; } // OK the main event. Merge the main types if (typeInfo::AreEquivalent(*pDest, *pSrc)) { return true; } if (pDest->IsUnboxedGenericTypeVar() || pSrc->IsUnboxedGenericTypeVar()) { // Should have had *pDest == *pSrc goto FAIL; } if (pDest->IsType(TI_REF)) { if (pSrc->IsType(TI_NULL)) { // NULL can be any reference type return true; } if (!pSrc->IsType(TI_REF)) { goto FAIL; } // Ask the EE to find the common parent, This always succeeds since System.Object always works CORINFO_CLASS_HANDLE pDestClsBefore = pDest->m_cls; pDest->m_cls = CompHnd->mergeClasses(pDest->GetClassHandle(), pSrc->GetClassHandle()); if (pDestClsBefore != pDest->m_cls) { *changed = true; } return true; } else if (pDest->IsType(TI_NULL)) { if (pSrc->IsType(TI_REF)) // NULL can be any reference type { *pDest = *pSrc; *changed = true; return true; } goto FAIL; } else if (pDest->IsType(TI_STRUCT)) { if (pSrc->IsType(TI_STRUCT) && CompHnd->areTypesEquivalent(pDest->GetClassHandle(), pSrc->GetClassHandle())) { return true; } goto FAIL; } else if (pDest->IsByRef()) { return tiCompatibleWithByRef(CompHnd, *pSrc, *pDest); } #ifdef TARGET_64BIT // On 64-bit targets we have precise representation for native int, so these rules // represent the fact that the ECMA spec permits the implicit conversion // between an int32 and a native int. else if (typeInfo::AreEquivalent(*pDest, typeInfo::nativeInt()) && pSrc->IsType(TI_INT)) { return true; } else if (typeInfo::AreEquivalent(*pSrc, typeInfo::nativeInt()) && pDest->IsType(TI_INT)) { *pDest = *pSrc; *changed = true; return true; } #endif // TARGET_64BIT FAIL: *pDest = typeInfo(); return false; } #ifdef DEBUG #if VERBOSE_VERIFY // Utility method to have a detailed dump of a TypeInfo object void typeInfo::Dump() const { char flagsStr[8]; flagsStr[0] = ((m_flags & TI_FLAG_UNINIT_OBJREF) != 0) ? 'U' : '-'; flagsStr[1] = ((m_flags & TI_FLAG_BYREF) != 0) ? 'B' : '-'; flagsStr[2] = ((m_flags & TI_FLAG_BYREF_READONLY) != 0) ? 'R' : '-'; flagsStr[3] = ((m_flags & TI_FLAG_NATIVE_INT) != 0) ? 'N' : '-'; flagsStr[4] = ((m_flags & TI_FLAG_THIS_PTR) != 0) ? 'T' : '-'; flagsStr[5] = ((m_flags & TI_FLAG_BYREF_PERMANENT_HOME) != 0) ? 'P' : '-'; flagsStr[6] = ((m_flags & TI_FLAG_GENERIC_TYPE_VAR) != 0) ? 'G' : '-'; flagsStr[7] = '\0'; printf("[%s(%X) {%s}]", tiType2Str(m_bits.type), m_cls, flagsStr); } #endif // VERBOSE_VERIFY #endif // DEBUG
-1
dotnet/runtime
66,367
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled
When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
jakobbotsch
2022-03-09T00:01:53Z
2022-03-09T12:59:09Z
c9f7f7389e8e9a00d501aef696333b67d218baac
cef825fd5425203ac28d073bf9fccc33c638f179
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled. When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
./src/mono/mono/mini/wasm_m2n_invoke.g.h
/* * GENERATED FILE, DON'T EDIT * Generated by wasm-tuner.exe --gen-interp-to-native */ static void wasm_invoke_v (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(void); T func = (T)target_func; func (); } static void wasm_invoke_vi (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0); T func = (T)target_func; func ((int)(gssize)margs->iargs [0]); } static void wasm_invoke_vii (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, int arg_1); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1]); } static void wasm_invoke_viii (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, int arg_1, int arg_2); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2]); } static void wasm_invoke_viiii (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, int arg_1, int arg_2, int arg_3); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3]); } static void wasm_invoke_viiiii (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, int arg_1, int arg_2, int arg_3, int arg_4); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4]); } static void wasm_invoke_viiiiii (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, int arg_1, int arg_2, int arg_3, int arg_4, int arg_5); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5]); } static void wasm_invoke_viiiiiii (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, int arg_1, int arg_2, int arg_3, int arg_4, int arg_5, int arg_6); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5], (int)(gssize)margs->iargs [6]); } static void wasm_invoke_viiiiiiii (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, int arg_1, int arg_2, int arg_3, int arg_4, int arg_5, int arg_6, int arg_7); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5], (int)(gssize)margs->iargs [6], (int)(gssize)margs->iargs [7]); } static void wasm_invoke_viiiiiiiii (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, int arg_1, int arg_2, int arg_3, int arg_4, int arg_5, int arg_6, int arg_7, int arg_8); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5], (int)(gssize)margs->iargs [6], (int)(gssize)margs->iargs [7], (int)(gssize)margs->iargs [8]); } static void wasm_invoke_viiiiiiiiii (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, int arg_1, int arg_2, int arg_3, int arg_4, int arg_5, int arg_6, int arg_7, int arg_8, int arg_9); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5], (int)(gssize)margs->iargs [6], (int)(gssize)margs->iargs [7], (int)(gssize)margs->iargs [8], (int)(gssize)margs->iargs [9]); } static void wasm_invoke_viiiiiiiiiii (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, int arg_1, int arg_2, int arg_3, int arg_4, int arg_5, int arg_6, int arg_7, int arg_8, int arg_9, int arg_10); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5], (int)(gssize)margs->iargs [6], (int)(gssize)margs->iargs [7], (int)(gssize)margs->iargs [8], (int)(gssize)margs->iargs [9], (int)(gssize)margs->iargs [10]); } static void wasm_invoke_viiiiiiiiiiii (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, int arg_1, int arg_2, int arg_3, int arg_4, int arg_5, int arg_6, int arg_7, int arg_8, int arg_9, int arg_10, int arg_11); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5], (int)(gssize)margs->iargs [6], (int)(gssize)margs->iargs [7], (int)(gssize)margs->iargs [8], (int)(gssize)margs->iargs [9], (int)(gssize)margs->iargs [10], (int)(gssize)margs->iargs [11]); } static void wasm_invoke_viiiiiiiiiiiii (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, int arg_1, int arg_2, int arg_3, int arg_4, int arg_5, int arg_6, int arg_7, int arg_8, int arg_9, int arg_10, int arg_11, int arg_12); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5], (int)(gssize)margs->iargs [6], (int)(gssize)margs->iargs [7], (int)(gssize)margs->iargs [8], (int)(gssize)margs->iargs [9], (int)(gssize)margs->iargs [10], (int)(gssize)margs->iargs [11], (int)(gssize)margs->iargs [12]); } static void wasm_invoke_viiiiiiiiiiiiii (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, int arg_1, int arg_2, int arg_3, int arg_4, int arg_5, int arg_6, int arg_7, int arg_8, int arg_9, int arg_10, int arg_11, int arg_12, int arg_13); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5], (int)(gssize)margs->iargs [6], (int)(gssize)margs->iargs [7], (int)(gssize)margs->iargs [8], (int)(gssize)margs->iargs [9], (int)(gssize)margs->iargs [10], (int)(gssize)margs->iargs [11], (int)(gssize)margs->iargs [12], (int)(gssize)margs->iargs [13]); } static void wasm_invoke_viiiiiiiiiiiiiii (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, int arg_1, int arg_2, int arg_3, int arg_4, int arg_5, int arg_6, int arg_7, int arg_8, int arg_9, int arg_10, int arg_11, int arg_12, int arg_13, int arg_14); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5], (int)(gssize)margs->iargs [6], (int)(gssize)margs->iargs [7], (int)(gssize)margs->iargs [8], (int)(gssize)margs->iargs [9], (int)(gssize)margs->iargs [10], (int)(gssize)margs->iargs [11], (int)(gssize)margs->iargs [12], (int)(gssize)margs->iargs [13], (int)(gssize)margs->iargs [14]); } static void wasm_invoke_i (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(void); T func = (T)target_func; int res = func (); *(int*)margs->retval = res; } static void wasm_invoke_ii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0]); *(int*)margs->retval = res; } static void wasm_invoke_iii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1]); *(int*)margs->retval = res; } static void wasm_invoke_iiii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, int arg_2); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2]); *(int*)margs->retval = res; } static void wasm_invoke_iiiii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, int arg_2, int arg_3); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3]); *(int*)margs->retval = res; } static void wasm_invoke_iiiiii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, int arg_2, int arg_3, int arg_4); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4]); *(int*)margs->retval = res; } static void wasm_invoke_iiiiiii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, int arg_2, int arg_3, int arg_4, int arg_5); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5]); *(int*)margs->retval = res; } static void wasm_invoke_iiiiiiii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, int arg_2, int arg_3, int arg_4, int arg_5, int arg_6); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5], (int)(gssize)margs->iargs [6]); *(int*)margs->retval = res; } static void wasm_invoke_iiiiiiiii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, int arg_2, int arg_3, int arg_4, int arg_5, int arg_6, int arg_7); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5], (int)(gssize)margs->iargs [6], (int)(gssize)margs->iargs [7]); *(int*)margs->retval = res; } static void wasm_invoke_iiiiiiiiii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, int arg_2, int arg_3, int arg_4, int arg_5, int arg_6, int arg_7, int arg_8); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5], (int)(gssize)margs->iargs [6], (int)(gssize)margs->iargs [7], (int)(gssize)margs->iargs [8]); *(int*)margs->retval = res; } static void wasm_invoke_iiiiiiiiiii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, int arg_2, int arg_3, int arg_4, int arg_5, int arg_6, int arg_7, int arg_8, int arg_9); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5], (int)(gssize)margs->iargs [6], (int)(gssize)margs->iargs [7], (int)(gssize)margs->iargs [8], (int)(gssize)margs->iargs [9]); *(int*)margs->retval = res; } static void wasm_invoke_iiiiiiiiiiii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, int arg_2, int arg_3, int arg_4, int arg_5, int arg_6, int arg_7, int arg_8, int arg_9, int arg_10); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5], (int)(gssize)margs->iargs [6], (int)(gssize)margs->iargs [7], (int)(gssize)margs->iargs [8], (int)(gssize)margs->iargs [9], (int)(gssize)margs->iargs [10]); *(int*)margs->retval = res; } static void wasm_invoke_iiiiiiiiiiiii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, int arg_2, int arg_3, int arg_4, int arg_5, int arg_6, int arg_7, int arg_8, int arg_9, int arg_10, int arg_11); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5], (int)(gssize)margs->iargs [6], (int)(gssize)margs->iargs [7], (int)(gssize)margs->iargs [8], (int)(gssize)margs->iargs [9], (int)(gssize)margs->iargs [10], (int)(gssize)margs->iargs [11]); *(int*)margs->retval = res; } static void wasm_invoke_iiiiiiiiiiiiii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, int arg_2, int arg_3, int arg_4, int arg_5, int arg_6, int arg_7, int arg_8, int arg_9, int arg_10, int arg_11, int arg_12); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5], (int)(gssize)margs->iargs [6], (int)(gssize)margs->iargs [7], (int)(gssize)margs->iargs [8], (int)(gssize)margs->iargs [9], (int)(gssize)margs->iargs [10], (int)(gssize)margs->iargs [11], (int)(gssize)margs->iargs [12]); *(int*)margs->retval = res; } static void wasm_invoke_iiliiii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, gint64 arg_1, int arg_2, int arg_3, int arg_4, int arg_5); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], get_long_arg (margs, 1), (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5], (int)(gssize)margs->iargs [6]); *(int*)margs->retval = res; } static void wasm_invoke_iiil (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, gint64 arg_2); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], get_long_arg (margs, 2)); *(int*)margs->retval = res; } static void wasm_invoke_if (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(float arg_0); T func = (T)target_func; int res = func (*(float*)&margs->fargs [FIDX (0)]); *(int*)margs->retval = res; } static void wasm_invoke_id (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(double arg_0); T func = (T)target_func; int res = func (margs->fargs [FIDX (0)]); *(int*)margs->retval = res; } static void wasm_invoke_iif (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, float arg_1); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)]); *(int*)margs->retval = res; } static void wasm_invoke_iifi (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, float arg_1, int arg_2); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)], (int)(gssize)margs->iargs [1]); *(int*)margs->retval = res; } static void wasm_invoke_iiff (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, float arg_1, float arg_2); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)]); *(int*)margs->retval = res; } static void wasm_invoke_iffii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(float arg_0, float arg_1, int arg_2, int arg_3); T func = (T)target_func; int res = func (*(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], (int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1]); *(int*)margs->retval = res; } static void wasm_invoke_iifii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, float arg_1, int arg_2, int arg_3); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2]); *(int*)margs->retval = res; } static void wasm_invoke_iiffi (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, float arg_1, float arg_2, int arg_3); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], (int)(gssize)margs->iargs [1]); *(int*)margs->retval = res; } static void wasm_invoke_iifff (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, float arg_1, float arg_2, float arg_3); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)]); *(int*)margs->retval = res; } static void wasm_invoke_iifffi (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, float arg_1, float arg_2, float arg_3, int arg_4); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)], (int)(gssize)margs->iargs [1]); *(int*)margs->retval = res; } static void wasm_invoke_iiffii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, float arg_1, float arg_2, int arg_3, int arg_4); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2]); *(int*)margs->retval = res; } static void wasm_invoke_iifiii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, float arg_1, int arg_2, int arg_3, int arg_4); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3]); *(int*)margs->retval = res; } static void wasm_invoke_iiffffi (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, float arg_1, float arg_2, float arg_3, float arg_4, int arg_5); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)], *(float*)&margs->fargs [FIDX (3)], (int)(gssize)margs->iargs [1]); *(int*)margs->retval = res; } static void wasm_invoke_iiffffii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, float arg_1, float arg_2, float arg_3, float arg_4, int arg_5, int arg_6); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)], *(float*)&margs->fargs [FIDX (3)], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2]); *(int*)margs->retval = res; } static void wasm_invoke_iiif (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, float arg_2); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], *(float*)&margs->fargs [FIDX (0)]); *(int*)margs->retval = res; } static void wasm_invoke_iiifi (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, float arg_2, int arg_3); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], *(float*)&margs->fargs [FIDX (0)], (int)(gssize)margs->iargs [2]); *(int*)margs->retval = res; } static void wasm_invoke_iiifii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, float arg_2, int arg_3, int arg_4); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], *(float*)&margs->fargs [FIDX (0)], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3]); *(int*)margs->retval = res; } static void wasm_invoke_iiifiii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, float arg_2, int arg_3, int arg_4, int arg_5); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], *(float*)&margs->fargs [FIDX (0)], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4]); *(int*)margs->retval = res; } static void wasm_invoke_iiiif (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, int arg_2, float arg_3); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], *(float*)&margs->fargs [FIDX (0)]); *(int*)margs->retval = res; } static void wasm_invoke_iiiifi (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, int arg_2, float arg_3, int arg_4); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], *(float*)&margs->fargs [FIDX (0)], (int)(gssize)margs->iargs [3]); *(int*)margs->retval = res; } static void wasm_invoke_iiiifii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, int arg_2, float arg_3, int arg_4, int arg_5); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], *(float*)&margs->fargs [FIDX (0)], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4]); *(int*)margs->retval = res; } static void wasm_invoke_iiiifiii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, int arg_2, float arg_3, int arg_4, int arg_5, int arg_6); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], *(float*)&margs->fargs [FIDX (0)], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5]); *(int*)margs->retval = res; } static void wasm_invoke_iiiffff (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, float arg_2, float arg_3, float arg_4, float arg_5); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)], *(float*)&margs->fargs [FIDX (3)]); *(int*)margs->retval = res; } static void wasm_invoke_iiifffff (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, float arg_2, float arg_3, float arg_4, float arg_5, float arg_6); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)], *(float*)&margs->fargs [FIDX (3)], *(float*)&margs->fargs [FIDX (4)]); *(int*)margs->retval = res; } static void wasm_invoke_iiffffff (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, float arg_1, float arg_2, float arg_3, float arg_4, float arg_5, float arg_6); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)], *(float*)&margs->fargs [FIDX (3)], *(float*)&margs->fargs [FIDX (4)], *(float*)&margs->fargs [FIDX (5)]); *(int*)margs->retval = res; } static void wasm_invoke_iiiffffff (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, float arg_2, float arg_3, float arg_4, float arg_5, float arg_6, float arg_7); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)], *(float*)&margs->fargs [FIDX (3)], *(float*)&margs->fargs [FIDX (4)], *(float*)&margs->fargs [FIDX (5)]); *(int*)margs->retval = res; } static void wasm_invoke_iiiiiiif (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, int arg_2, int arg_3, int arg_4, int arg_5, float arg_6); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5], *(float*)&margs->fargs [FIDX (0)]); *(int*)margs->retval = res; } static void wasm_invoke_iiiiiiiff (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, int arg_2, int arg_3, int arg_4, int arg_5, float arg_6, float arg_7); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)]); *(int*)margs->retval = res; } static void wasm_invoke_iiffffffff (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, float arg_1, float arg_2, float arg_3, float arg_4, float arg_5, float arg_6, float arg_7, float arg_8); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)], *(float*)&margs->fargs [FIDX (3)], *(float*)&margs->fargs [FIDX (4)], *(float*)&margs->fargs [FIDX (5)], *(float*)&margs->fargs [FIDX (6)], *(float*)&margs->fargs [FIDX (7)]); *(int*)margs->retval = res; } static void wasm_invoke_iiiffffffff (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, float arg_2, float arg_3, float arg_4, float arg_5, float arg_6, float arg_7, float arg_8, float arg_9); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)], *(float*)&margs->fargs [FIDX (3)], *(float*)&margs->fargs [FIDX (4)], *(float*)&margs->fargs [FIDX (5)], *(float*)&margs->fargs [FIDX (6)], *(float*)&margs->fargs [FIDX (7)]); *(int*)margs->retval = res; } static void wasm_invoke_iiiiiifii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, int arg_2, int arg_3, int arg_4, float arg_5, int arg_6, int arg_7); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], *(float*)&margs->fargs [FIDX (0)], (int)(gssize)margs->iargs [5], (int)(gssize)margs->iargs [6]); *(int*)margs->retval = res; } static void wasm_invoke_iiiffffffffiii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, float arg_2, float arg_3, float arg_4, float arg_5, float arg_6, float arg_7, float arg_8, float arg_9, int arg_10, int arg_11, int arg_12); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)], *(float*)&margs->fargs [FIDX (3)], *(float*)&margs->fargs [FIDX (4)], *(float*)&margs->fargs [FIDX (5)], *(float*)&margs->fargs [FIDX (6)], *(float*)&margs->fargs [FIDX (7)], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4]); *(int*)margs->retval = res; } static void wasm_invoke_iiiiiffffiiii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, int arg_2, int arg_3, float arg_4, float arg_5, float arg_6, float arg_7, int arg_8, int arg_9, int arg_10, int arg_11); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)], *(float*)&margs->fargs [FIDX (3)], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5], (int)(gssize)margs->iargs [6], (int)(gssize)margs->iargs [7]); *(int*)margs->retval = res; } static void wasm_invoke_iffffffi (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(float arg_0, float arg_1, float arg_2, float arg_3, float arg_4, float arg_5, int arg_6); T func = (T)target_func; int res = func (*(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)], *(float*)&margs->fargs [FIDX (3)], *(float*)&margs->fargs [FIDX (4)], *(float*)&margs->fargs [FIDX (5)], (int)(gssize)margs->iargs [0]); *(int*)margs->retval = res; } static void wasm_invoke_iiffiii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, float arg_1, float arg_2, int arg_3, int arg_4, int arg_5); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3]); *(int*)margs->retval = res; } static void wasm_invoke_ili (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(gint64 arg_0, int arg_1); T func = (T)target_func; int res = func (get_long_arg (margs, 0), (int)(gssize)margs->iargs [2]); *(int*)margs->retval = res; } static void wasm_invoke_iilli (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, gint64 arg_1, gint64 arg_2, int arg_3); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], get_long_arg (margs, 1), get_long_arg (margs, 3), (int)(gssize)margs->iargs [5]); *(int*)margs->retval = res; } static void wasm_invoke_l (void *target_func, InterpMethodArguments *margs) { typedef gint64 (*T)(void); T func = (T)target_func; gint64 res = func (); *(gint64*)margs->retval = res; } static void wasm_invoke_ll (void *target_func, InterpMethodArguments *margs) { typedef gint64 (*T)(gint64 arg_0); T func = (T)target_func; gint64 res = func (get_long_arg (margs, 0)); *(gint64*)margs->retval = res; } static void wasm_invoke_li (void *target_func, InterpMethodArguments *margs) { typedef gint64 (*T)(int arg_0); T func = (T)target_func; gint64 res = func ((int)(gssize)margs->iargs [0]); *(gint64*)margs->retval = res; } static void wasm_invoke_lil (void *target_func, InterpMethodArguments *margs) { typedef gint64 (*T)(int arg_0, gint64 arg_1); T func = (T)target_func; gint64 res = func ((int)(gssize)margs->iargs [0], get_long_arg (margs, 1)); *(gint64*)margs->retval = res; } static void wasm_invoke_lili (void *target_func, InterpMethodArguments *margs) { typedef gint64 (*T)(int arg_0, gint64 arg_1, int arg_2); T func = (T)target_func; gint64 res = func ((int)(gssize)margs->iargs [0], get_long_arg (margs, 1), (int)(gssize)margs->iargs [3]); *(gint64*)margs->retval = res; } static void wasm_invoke_lilii (void *target_func, InterpMethodArguments *margs) { typedef gint64 (*T)(int arg_0, gint64 arg_1, int arg_2, int arg_3); T func = (T)target_func; gint64 res = func ((int)(gssize)margs->iargs [0], get_long_arg (margs, 1), (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4]); *(gint64*)margs->retval = res; } static void wasm_invoke_dd (void *target_func, InterpMethodArguments *margs) { typedef double (*T)(double arg_0); T func = (T)target_func; double res = func (margs->fargs [FIDX (0)]); *(double*)margs->retval = res; } static void wasm_invoke_ddi (void *target_func, InterpMethodArguments *margs) { typedef double (*T)(double arg_0, int arg_1); T func = (T)target_func; double res = func (margs->fargs [FIDX (0)], (int)(gssize)margs->iargs [0]); *(double*)margs->retval = res; } static void wasm_invoke_ddd (void *target_func, InterpMethodArguments *margs) { typedef double (*T)(double arg_0, double arg_1); T func = (T)target_func; double res = func (margs->fargs [FIDX (0)], margs->fargs [FIDX (1)]); *(double*)margs->retval = res; } static void wasm_invoke_dddd (void *target_func, InterpMethodArguments *margs) { typedef double (*T)(double arg_0, double arg_1, double arg_2); T func = (T)target_func; double res = func (margs->fargs [FIDX (0)], margs->fargs [FIDX (1)], margs->fargs [FIDX (2)]); *(double*)margs->retval = res; } static void wasm_invoke_vf (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(float arg_0); T func = (T)target_func; func (*(float*)&margs->fargs [FIDX (0)]); } static void wasm_invoke_vff (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(float arg_0, float arg_1); T func = (T)target_func; func (*(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)]); } static void wasm_invoke_vfff (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(float arg_0, float arg_1, float arg_2); T func = (T)target_func; func (*(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)]); } static void wasm_invoke_vffff (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(float arg_0, float arg_1, float arg_2, float arg_3); T func = (T)target_func; func (*(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)], *(float*)&margs->fargs [FIDX (3)]); } static void wasm_invoke_vfffff (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(float arg_0, float arg_1, float arg_2, float arg_3, float arg_4); T func = (T)target_func; func (*(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)], *(float*)&margs->fargs [FIDX (3)], *(float*)&margs->fargs [FIDX (4)]); } static void wasm_invoke_vffffff (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(float arg_0, float arg_1, float arg_2, float arg_3, float arg_4, float arg_5); T func = (T)target_func; func (*(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)], *(float*)&margs->fargs [FIDX (3)], *(float*)&margs->fargs [FIDX (4)], *(float*)&margs->fargs [FIDX (5)]); } static void wasm_invoke_vfffffff (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(float arg_0, float arg_1, float arg_2, float arg_3, float arg_4, float arg_5, float arg_6); T func = (T)target_func; func (*(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)], *(float*)&margs->fargs [FIDX (3)], *(float*)&margs->fargs [FIDX (4)], *(float*)&margs->fargs [FIDX (5)], *(float*)&margs->fargs [FIDX (6)]); } static void wasm_invoke_vffffffff (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(float arg_0, float arg_1, float arg_2, float arg_3, float arg_4, float arg_5, float arg_6, float arg_7); T func = (T)target_func; func (*(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)], *(float*)&margs->fargs [FIDX (3)], *(float*)&margs->fargs [FIDX (4)], *(float*)&margs->fargs [FIDX (5)], *(float*)&margs->fargs [FIDX (6)], *(float*)&margs->fargs [FIDX (7)]); } static void wasm_invoke_vfi (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(float arg_0, int arg_1); T func = (T)target_func; func (*(float*)&margs->fargs [FIDX (0)], (int)(gssize)margs->iargs [0]); } static void wasm_invoke_vif (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, float arg_1); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)]); } static void wasm_invoke_viff (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, float arg_1, float arg_2); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)]); } static void wasm_invoke_viffff (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, float arg_1, float arg_2, float arg_3, float arg_4); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)], *(float*)&margs->fargs [FIDX (3)]); } static void wasm_invoke_vifffff (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, float arg_1, float arg_2, float arg_3, float arg_4, float arg_5); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)], *(float*)&margs->fargs [FIDX (3)], *(float*)&margs->fargs [FIDX (4)]); } static void wasm_invoke_viffffff (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, float arg_1, float arg_2, float arg_3, float arg_4, float arg_5, float arg_6); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)], *(float*)&margs->fargs [FIDX (3)], *(float*)&margs->fargs [FIDX (4)], *(float*)&margs->fargs [FIDX (5)]); } static void wasm_invoke_vifffffi (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, float arg_1, float arg_2, float arg_3, float arg_4, float arg_5, int arg_6); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)], *(float*)&margs->fargs [FIDX (3)], *(float*)&margs->fargs [FIDX (4)], (int)(gssize)margs->iargs [1]); } static void wasm_invoke_viiffi (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, int arg_1, float arg_2, float arg_3, int arg_4); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], (int)(gssize)margs->iargs [2]); } static void wasm_invoke_viif (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, int arg_1, float arg_2); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], *(float*)&margs->fargs [FIDX (0)]); } static void wasm_invoke_viifff (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, int arg_1, float arg_2, float arg_3, float arg_4); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)]); } static void wasm_invoke_viifi (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, int arg_1, float arg_2, int arg_3); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], *(float*)&margs->fargs [FIDX (0)], (int)(gssize)margs->iargs [2]); } static void wasm_invoke_ff (void *target_func, InterpMethodArguments *margs) { typedef float (*T)(float arg_0); T func = (T)target_func; float res = func (*(float*)&margs->fargs [FIDX (0)]); *(float*)margs->retval = res; } static void wasm_invoke_ffi (void *target_func, InterpMethodArguments *margs) { typedef float (*T)(float arg_0, int arg_1); T func = (T)target_func; float res = func (*(float*)&margs->fargs [FIDX (0)], (int)(gssize)margs->iargs [0]); *(float*)margs->retval = res; } static void wasm_invoke_fff (void *target_func, InterpMethodArguments *margs) { typedef float (*T)(float arg_0, float arg_1); T func = (T)target_func; float res = func (*(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)]); *(float*)margs->retval = res; } static void wasm_invoke_ffff (void *target_func, InterpMethodArguments *margs) { typedef float (*T)(float arg_0, float arg_1, float arg_2); T func = (T)target_func; float res = func (*(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)]); *(float*)margs->retval = res; } static void wasm_invoke_di (void *target_func, InterpMethodArguments *margs) { typedef double (*T)(int arg_0); T func = (T)target_func; double res = func ((int)(gssize)margs->iargs [0]); *(double*)margs->retval = res; } static void wasm_invoke_fi (void *target_func, InterpMethodArguments *margs) { typedef float (*T)(int arg_0); T func = (T)target_func; float res = func ((int)(gssize)margs->iargs [0]); *(float*)margs->retval = res; } static void wasm_invoke_iil (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, gint64 arg_1); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], get_long_arg (margs, 1)); *(int*)margs->retval = res; } static void wasm_invoke_iili (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, gint64 arg_1, int arg_2); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], get_long_arg (margs, 1), (int)(gssize)margs->iargs [3]); *(int*)margs->retval = res; } static void wasm_invoke_iiliiil (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, gint64 arg_1, int arg_2, int arg_3, int arg_4, gint64 arg_5); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], get_long_arg (margs, 1), (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5], get_long_arg (margs, 6)); *(int*)margs->retval = res; } static void wasm_invoke_iillli (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, gint64 arg_1, gint64 arg_2, gint64 arg_3, int arg_4); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], get_long_arg (margs, 1), get_long_arg (margs, 3), get_long_arg (margs, 5), (int)(gssize)margs->iargs [7]); *(int*)margs->retval = res; } static void wasm_invoke_idiii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(double arg_0, int arg_1, int arg_2, int arg_3); T func = (T)target_func; int res = func (margs->fargs [FIDX (0)], (int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2]); *(int*)margs->retval = res; } static void wasm_invoke_lii (void *target_func, InterpMethodArguments *margs) { typedef gint64 (*T)(int arg_0, int arg_1); T func = (T)target_func; gint64 res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1]); *(gint64*)margs->retval = res; } static void wasm_invoke_vid (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, double arg_1); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], margs->fargs [FIDX (0)]); } static void wasm_invoke_villi (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, gint64 arg_1, gint64 arg_2, int arg_3); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], get_long_arg (margs, 1), get_long_arg (margs, 3), (int)(gssize)margs->iargs [5]); } static void wasm_invoke_did (void *target_func, InterpMethodArguments *margs) { typedef double (*T)(int arg_0, double arg_1); T func = (T)target_func; double res = func ((int)(gssize)margs->iargs [0], margs->fargs [FIDX (0)]); *(double*)margs->retval = res; } static void wasm_invoke_didd (void *target_func, InterpMethodArguments *margs) { typedef double (*T)(int arg_0, double arg_1, double arg_2); T func = (T)target_func; double res = func ((int)(gssize)margs->iargs [0], margs->fargs [FIDX (0)], margs->fargs [FIDX (1)]); *(double*)margs->retval = res; } static void wasm_invoke_fif (void *target_func, InterpMethodArguments *margs) { typedef float (*T)(int arg_0, float arg_1); T func = (T)target_func; float res = func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)]); *(float*)margs->retval = res; } static void wasm_invoke_fiff (void *target_func, InterpMethodArguments *margs) { typedef float (*T)(int arg_0, float arg_1, float arg_2); T func = (T)target_func; float res = func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)]); *(float*)margs->retval = res; } static void wasm_invoke_lill (void *target_func, InterpMethodArguments *margs) { typedef gint64 (*T)(int arg_0, gint64 arg_1, gint64 arg_2); T func = (T)target_func; gint64 res = func ((int)(gssize)margs->iargs [0], get_long_arg (margs, 1), get_long_arg (margs, 3)); *(gint64*)margs->retval = res; } static void wasm_invoke_vl (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(gint64 arg_0); T func = (T)target_func; func (get_long_arg (margs, 0)); } static void wasm_invoke_vil (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, gint64 arg_1); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], get_long_arg (margs, 1)); } static void wasm_invoke_viil (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, int arg_1, gint64 arg_2); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], get_long_arg (margs, 2)); } static void wasm_invoke_fifff (void *target_func, InterpMethodArguments *margs) { typedef float (*T)(int arg_0, float arg_1, float arg_2, float arg_3); T func = (T)target_func; float res = func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)]); *(float*)margs->retval = res; } static void wasm_invoke_fii (void *target_func, InterpMethodArguments *margs) { typedef float (*T)(int arg_0, int arg_1); T func = (T)target_func; float res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1]); *(float*)margs->retval = res; } static void wasm_invoke_fiii (void *target_func, InterpMethodArguments *margs) { typedef float (*T)(int arg_0, int arg_1, int arg_2); T func = (T)target_func; float res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2]); *(float*)margs->retval = res; } static void wasm_invoke_fiiiiii (void *target_func, InterpMethodArguments *margs) { typedef float (*T)(int arg_0, int arg_1, int arg_2, int arg_3, int arg_4, int arg_5); T func = (T)target_func; float res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5]); *(float*)margs->retval = res; } static void wasm_invoke_iffffiiii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(float arg_0, float arg_1, float arg_2, float arg_3, int arg_4, int arg_5, int arg_6, int arg_7); T func = (T)target_func; int res = func (*(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)], *(float*)&margs->fargs [FIDX (3)], (int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3]); *(int*)margs->retval = res; } static void wasm_invoke_iffi (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(float arg_0, float arg_1, int arg_2); T func = (T)target_func; int res = func (*(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], (int)(gssize)margs->iargs [0]); *(int*)margs->retval = res; } static void wasm_invoke_iffif (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(float arg_0, float arg_1, int arg_2, float arg_3); T func = (T)target_func; int res = func (*(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], (int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (2)]); *(int*)margs->retval = res; } static void wasm_invoke_iffifi (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(float arg_0, float arg_1, int arg_2, float arg_3, int arg_4); T func = (T)target_func; int res = func (*(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], (int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (2)], (int)(gssize)margs->iargs [1]); *(int*)margs->retval = res; } static void wasm_invoke_ifi (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(float arg_0, int arg_1); T func = (T)target_func; int res = func (*(float*)&margs->fargs [FIDX (0)], (int)(gssize)margs->iargs [0]); *(int*)margs->retval = res; } static void wasm_invoke_ifiii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(float arg_0, int arg_1, int arg_2, int arg_3); T func = (T)target_func; int res = func (*(float*)&margs->fargs [FIDX (0)], (int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2]); *(int*)margs->retval = res; } static void wasm_invoke_iififiiiii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, float arg_1, int arg_2, float arg_3, int arg_4, int arg_5, int arg_6, int arg_7, int arg_8); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)], (int)(gssize)margs->iargs [1], *(float*)&margs->fargs [FIDX (1)], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5], (int)(gssize)margs->iargs [6]); *(int*)margs->retval = res; } static void wasm_invoke_iififiiiiii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, float arg_1, int arg_2, float arg_3, int arg_4, int arg_5, int arg_6, int arg_7, int arg_8, int arg_9); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)], (int)(gssize)margs->iargs [1], *(float*)&margs->fargs [FIDX (1)], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5], (int)(gssize)margs->iargs [6], (int)(gssize)margs->iargs [7]); *(int*)margs->retval = res; } static void wasm_invoke_iifiiiii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, float arg_1, int arg_2, int arg_3, int arg_4, int arg_5, int arg_6); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5]); *(int*)margs->retval = res; } static void wasm_invoke_iifiiiiii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, float arg_1, int arg_2, int arg_3, int arg_4, int arg_5, int arg_6, int arg_7); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5], (int)(gssize)margs->iargs [6]); *(int*)margs->retval = res; } static void wasm_invoke_iiifffii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, float arg_2, float arg_3, float arg_4, int arg_5, int arg_6); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3]); *(int*)margs->retval = res; } static void wasm_invoke_iiiffifffii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, float arg_2, float arg_3, int arg_4, float arg_5, float arg_6, float arg_7, int arg_8, int arg_9); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], (int)(gssize)margs->iargs [2], *(float*)&margs->fargs [FIDX (2)], *(float*)&margs->fargs [FIDX (3)], *(float*)&margs->fargs [FIDX (4)], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4]); *(int*)margs->retval = res; } static void wasm_invoke_iiiffiffii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, float arg_2, float arg_3, int arg_4, float arg_5, float arg_6, int arg_7, int arg_8); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], (int)(gssize)margs->iargs [2], *(float*)&margs->fargs [FIDX (2)], *(float*)&margs->fargs [FIDX (3)], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4]); *(int*)margs->retval = res; } static void wasm_invoke_iiiffii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, float arg_2, float arg_3, int arg_4, int arg_5); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3]); *(int*)margs->retval = res; } static void wasm_invoke_iiiffiiiii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, float arg_2, float arg_3, int arg_4, int arg_5, int arg_6, int arg_7, int arg_8); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5], (int)(gssize)margs->iargs [6]); *(int*)margs->retval = res; } static void wasm_invoke_iiiiif (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, int arg_2, int arg_3, float arg_4); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], *(float*)&margs->fargs [FIDX (0)]); *(int*)margs->retval = res; } static void wasm_invoke_iiiiifii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, int arg_2, int arg_3, float arg_4, int arg_5, int arg_6); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], *(float*)&margs->fargs [FIDX (0)], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5]); *(int*)margs->retval = res; } static void wasm_invoke_iiiiiiffi (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, int arg_2, int arg_3, int arg_4, float arg_5, float arg_6, int arg_7); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], (int)(gssize)margs->iargs [5]); *(int*)margs->retval = res; } static void wasm_invoke_iiiiiiiffi (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, int arg_2, int arg_3, int arg_4, int arg_5, float arg_6, float arg_7, int arg_8); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], (int)(gssize)margs->iargs [6]); *(int*)margs->retval = res; } static void wasm_invoke_vifff (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, float arg_1, float arg_2, float arg_3); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)]); } static void wasm_invoke_viffffi (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, float arg_1, float arg_2, float arg_3, float arg_4, int arg_5); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)], *(float*)&margs->fargs [FIDX (3)], (int)(gssize)margs->iargs [1]); } static void wasm_invoke_vifffi (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, float arg_1, float arg_2, float arg_3, int arg_4); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)], (int)(gssize)margs->iargs [1]); } static void wasm_invoke_vifffiiff (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, float arg_1, float arg_2, float arg_3, int arg_4, int arg_5, float arg_6, float arg_7); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], *(float*)&margs->fargs [FIDX (3)], *(float*)&margs->fargs [FIDX (4)]); } static void wasm_invoke_viffi (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, float arg_1, float arg_2, int arg_3); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], (int)(gssize)margs->iargs [1]); } static void wasm_invoke_vifi (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, float arg_1, int arg_2); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)], (int)(gssize)margs->iargs [1]); } static void wasm_invoke_viiff (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, int arg_1, float arg_2, float arg_3); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)]); } static void wasm_invoke_viiffff (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, int arg_1, float arg_2, float arg_3, float arg_4, float arg_5); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)], *(float*)&margs->fargs [FIDX (3)]); } static void wasm_invoke_viiffii (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, int arg_1, float arg_2, float arg_3, int arg_4, int arg_5); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3]); } static void wasm_invoke_viiif (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, int arg_1, int arg_2, float arg_3); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], *(float*)&margs->fargs [FIDX (0)]); } static void wasm_invoke_viiiffii (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, int arg_1, int arg_2, float arg_3, float arg_4, int arg_5, int arg_6); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4]); } static void wasm_invoke_viiiffiii (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, int arg_1, int arg_2, float arg_3, float arg_4, int arg_5, int arg_6, int arg_7); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5]); } static void wasm_invoke_viiifi (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, int arg_1, int arg_2, float arg_3, int arg_4); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], *(float*)&margs->fargs [FIDX (0)], (int)(gssize)margs->iargs [3]); } static void wasm_invoke_viiifii (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, int arg_1, int arg_2, float arg_3, int arg_4, int arg_5); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], *(float*)&margs->fargs [FIDX (0)], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4]); } static void wasm_invoke_viiifiii (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, int arg_1, int arg_2, float arg_3, int arg_4, int arg_5, int arg_6); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], *(float*)&margs->fargs [FIDX (0)], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5]); } static void wasm_invoke_viiiif (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, int arg_1, int arg_2, int arg_3, float arg_4); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], *(float*)&margs->fargs [FIDX (0)]); } static void wasm_invoke_iffffiii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(float arg_0, float arg_1, float arg_2, float arg_3, int arg_4, int arg_5, int arg_6); T func = (T)target_func; int res = func (*(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)], *(float*)&margs->fargs [FIDX (3)], (int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2]); *(int*)margs->retval = res; } static void wasm_invoke_iffiii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(float arg_0, float arg_1, int arg_2, int arg_3, int arg_4); T func = (T)target_func; int res = func (*(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], (int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2]); *(int*)margs->retval = res; } static void wasm_invoke_viiiiffii (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, int arg_1, int arg_2, int arg_3, float arg_4, float arg_5, int arg_6, int arg_7); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5]); } static void wasm_invoke_iiiliiii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, gint64 arg_2, int arg_3, int arg_4, int arg_5, int arg_6); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], get_long_arg (margs, 2), (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5], (int)(gssize)margs->iargs [6], (int)(gssize)margs->iargs [7]); *(int*)margs->retval = res; } static void wasm_invoke_iiilli (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, gint64 arg_2, gint64 arg_3, int arg_4); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], get_long_arg (margs, 2), get_long_arg (margs, 4), (int)(gssize)margs->iargs [6]); *(int*)margs->retval = res; } static void wasm_invoke_il (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(gint64 arg_0); T func = (T)target_func; int res = func (get_long_arg (margs, 0)); *(int*)margs->retval = res; } static void wasm_invoke_iff (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(float arg_0, float arg_1); T func = (T)target_func; int res = func (*(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)]); *(int*)margs->retval = res; } static void wasm_invoke_ifff (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(float arg_0, float arg_1, float arg_2); T func = (T)target_func; int res = func (*(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)]); *(int*)margs->retval = res; } static void wasm_invoke_iffff (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(float arg_0, float arg_1, float arg_2, float arg_3); T func = (T)target_func; int res = func (*(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)], *(float*)&margs->fargs [FIDX (3)]); *(int*)margs->retval = res; } static void wasm_invoke_vlii (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(gint64 arg_0, int arg_1, int arg_2); T func = (T)target_func; func (get_long_arg (margs, 0), (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3]); } static void wasm_invoke_iiiil (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, int arg_2, gint64 arg_3); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], get_long_arg (margs, 3)); *(int*)margs->retval = res; } static void wasm_invoke_liiil (void *target_func, InterpMethodArguments *margs) { typedef gint64 (*T)(int arg_0, int arg_1, int arg_2, gint64 arg_3); T func = (T)target_func; gint64 res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], get_long_arg (margs, 3)); *(gint64*)margs->retval = res; } static void wasm_invoke_iill (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, gint64 arg_1, gint64 arg_2); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], get_long_arg (margs, 1), get_long_arg (margs, 3)); *(int*)margs->retval = res; } static void wasm_invoke_iiffff (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, float arg_1, float arg_2, float arg_3, float arg_4); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)], *(float*)&margs->fargs [FIDX (3)]); *(int*)margs->retval = res; } static void wasm_invoke_iiiiidii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, int arg_2, int arg_3, double arg_4, int arg_5, int arg_6); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], margs->fargs [FIDX (0)], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5]); *(int*)margs->retval = res; } static void wasm_invoke_iiiiifiii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, int arg_2, int arg_3, float arg_4, int arg_5, int arg_6, int arg_7); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], *(float*)&margs->fargs [FIDX (0)], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5], (int)(gssize)margs->iargs [6]); *(int*)margs->retval = res; } static void wasm_invoke_liiii (void *target_func, InterpMethodArguments *margs) { typedef gint64 (*T)(int arg_0, int arg_1, int arg_2, int arg_3); T func = (T)target_func; gint64 res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3]); *(gint64*)margs->retval = res; } static const char* interp_to_native_signatures [] = { "DD", "DDD", "DDDD", "DDI", "DI", "DID", "DIDD", "FF", "FFF", "FFFF", "FFI", "FI", "FIF", "FIFF", "FIFFF", "FII", "FIII", "FIIIIII", "I", "ID", "IDIII", "IF", "IFF", "IFFF", "IFFFF", "IFFFFFFI", "IFFFFIII", "IFFFFIIII", "IFFI", "IFFIF", "IFFIFI", "IFFII", "IFFIII", "IFI", "IFIII", "II", "IIF", "IIFF", "IIFFF", "IIFFFF", "IIFFFFFF", "IIFFFFFFFF", "IIFFFFI", "IIFFFFII", "IIFFFI", "IIFFI", "IIFFII", "IIFFIII", "IIFI", "IIFIFIIIII", "IIFIFIIIIII", "IIFII", "IIFIII", "IIFIIIII", "IIFIIIIII", "III", "IIIF", "IIIFFFF", "IIIFFFFF", "IIIFFFFFF", "IIIFFFFFFFF", "IIIFFFFFFFFIII", "IIIFFFII", "IIIFFIFFFII", "IIIFFIFFII", "IIIFFII", "IIIFFIIIII", "IIIFI", "IIIFII", "IIIFIII", "IIII", "IIIIF", "IIIIFI", "IIIIFII", "IIIIFIII", "IIIII", "IIIIIDII", "IIIIIF", "IIIIIFFFFIIII", "IIIIIFII", "IIIIIFIII", "IIIIII", "IIIIIIFFI", "IIIIIIFII", "IIIIIII", "IIIIIIIF", "IIIIIIIFF", "IIIIIIIFFI", "IIIIIIII", "IIIIIIIII", "IIIIIIIIII", "IIIIIIIIIII", "IIIIIIIIIIII", "IIIIIIIIIIIII", "IIIIIIIIIIIIII", "IIIIL", "IIIL", "IIILIIII", "IIILLI", "IIL", "IILI", "IILIIII", "IILIIIL", "IILL", "IILLI", "IILLLI", "IL", "ILI", "L", "LI", "LII", "LIIII", "LIIIL", "LIL", "LILI", "LILII", "LILL", "LL", "V", "VF", "VFF", "VFFF", "VFFFF", "VFFFFF", "VFFFFFF", "VFFFFFFF", "VFFFFFFFF", "VFI", "VI", "VID", "VIF", "VIFF", "VIFFF", "VIFFFF", "VIFFFFF", "VIFFFFFF", "VIFFFFFI", "VIFFFFI", "VIFFFI", "VIFFFIIFF", "VIFFI", "VIFI", "VII", "VIIF", "VIIFF", "VIIFFF", "VIIFFFF", "VIIFFI", "VIIFFII", "VIIFI", "VIII", "VIIIF", "VIIIFFII", "VIIIFFIII", "VIIIFI", "VIIIFII", "VIIIFIII", "VIIII", "VIIIIF", "VIIIIFFII", "VIIIII", "VIIIIII", "VIIIIIII", "VIIIIIIII", "VIIIIIIIII", "VIIIIIIIIII", "VIIIIIIIIIII", "VIIIIIIIIIIII", "VIIIIIIIIIIIII", "VIIIIIIIIIIIIII", "VIIIIIIIIIIIIIII", "VIIL", "VIL", "VILLI", "VL", "VLII", }; static void* interp_to_native_invokes [] = { wasm_invoke_dd, wasm_invoke_ddd, wasm_invoke_dddd, wasm_invoke_ddi, wasm_invoke_di, wasm_invoke_did, wasm_invoke_didd, wasm_invoke_ff, wasm_invoke_fff, wasm_invoke_ffff, wasm_invoke_ffi, wasm_invoke_fi, wasm_invoke_fif, wasm_invoke_fiff, wasm_invoke_fifff, wasm_invoke_fii, wasm_invoke_fiii, wasm_invoke_fiiiiii, wasm_invoke_i, wasm_invoke_id, wasm_invoke_idiii, wasm_invoke_if, wasm_invoke_iff, wasm_invoke_ifff, wasm_invoke_iffff, wasm_invoke_iffffffi, wasm_invoke_iffffiii, wasm_invoke_iffffiiii, wasm_invoke_iffi, wasm_invoke_iffif, wasm_invoke_iffifi, wasm_invoke_iffii, wasm_invoke_iffiii, wasm_invoke_ifi, wasm_invoke_ifiii, wasm_invoke_ii, wasm_invoke_iif, wasm_invoke_iiff, wasm_invoke_iifff, wasm_invoke_iiffff, wasm_invoke_iiffffff, wasm_invoke_iiffffffff, wasm_invoke_iiffffi, wasm_invoke_iiffffii, wasm_invoke_iifffi, wasm_invoke_iiffi, wasm_invoke_iiffii, wasm_invoke_iiffiii, wasm_invoke_iifi, wasm_invoke_iififiiiii, wasm_invoke_iififiiiiii, wasm_invoke_iifii, wasm_invoke_iifiii, wasm_invoke_iifiiiii, wasm_invoke_iifiiiiii, wasm_invoke_iii, wasm_invoke_iiif, wasm_invoke_iiiffff, wasm_invoke_iiifffff, wasm_invoke_iiiffffff, wasm_invoke_iiiffffffff, wasm_invoke_iiiffffffffiii, wasm_invoke_iiifffii, wasm_invoke_iiiffifffii, wasm_invoke_iiiffiffii, wasm_invoke_iiiffii, wasm_invoke_iiiffiiiii, wasm_invoke_iiifi, wasm_invoke_iiifii, wasm_invoke_iiifiii, wasm_invoke_iiii, wasm_invoke_iiiif, wasm_invoke_iiiifi, wasm_invoke_iiiifii, wasm_invoke_iiiifiii, wasm_invoke_iiiii, wasm_invoke_iiiiidii, wasm_invoke_iiiiif, wasm_invoke_iiiiiffffiiii, wasm_invoke_iiiiifii, wasm_invoke_iiiiifiii, wasm_invoke_iiiiii, wasm_invoke_iiiiiiffi, wasm_invoke_iiiiiifii, wasm_invoke_iiiiiii, wasm_invoke_iiiiiiif, wasm_invoke_iiiiiiiff, wasm_invoke_iiiiiiiffi, wasm_invoke_iiiiiiii, wasm_invoke_iiiiiiiii, wasm_invoke_iiiiiiiiii, wasm_invoke_iiiiiiiiiii, wasm_invoke_iiiiiiiiiiii, wasm_invoke_iiiiiiiiiiiii, wasm_invoke_iiiiiiiiiiiiii, wasm_invoke_iiiil, wasm_invoke_iiil, wasm_invoke_iiiliiii, wasm_invoke_iiilli, wasm_invoke_iil, wasm_invoke_iili, wasm_invoke_iiliiii, wasm_invoke_iiliiil, wasm_invoke_iill, wasm_invoke_iilli, wasm_invoke_iillli, wasm_invoke_il, wasm_invoke_ili, wasm_invoke_l, wasm_invoke_li, wasm_invoke_lii, wasm_invoke_liiii, wasm_invoke_liiil, wasm_invoke_lil, wasm_invoke_lili, wasm_invoke_lilii, wasm_invoke_lill, wasm_invoke_ll, wasm_invoke_v, wasm_invoke_vf, wasm_invoke_vff, wasm_invoke_vfff, wasm_invoke_vffff, wasm_invoke_vfffff, wasm_invoke_vffffff, wasm_invoke_vfffffff, wasm_invoke_vffffffff, wasm_invoke_vfi, wasm_invoke_vi, wasm_invoke_vid, wasm_invoke_vif, wasm_invoke_viff, wasm_invoke_vifff, wasm_invoke_viffff, wasm_invoke_vifffff, wasm_invoke_viffffff, wasm_invoke_vifffffi, wasm_invoke_viffffi, wasm_invoke_vifffi, wasm_invoke_vifffiiff, wasm_invoke_viffi, wasm_invoke_vifi, wasm_invoke_vii, wasm_invoke_viif, wasm_invoke_viiff, wasm_invoke_viifff, wasm_invoke_viiffff, wasm_invoke_viiffi, wasm_invoke_viiffii, wasm_invoke_viifi, wasm_invoke_viii, wasm_invoke_viiif, wasm_invoke_viiiffii, wasm_invoke_viiiffiii, wasm_invoke_viiifi, wasm_invoke_viiifii, wasm_invoke_viiifiii, wasm_invoke_viiii, wasm_invoke_viiiif, wasm_invoke_viiiiffii, wasm_invoke_viiiii, wasm_invoke_viiiiii, wasm_invoke_viiiiiii, wasm_invoke_viiiiiiii, wasm_invoke_viiiiiiiii, wasm_invoke_viiiiiiiiii, wasm_invoke_viiiiiiiiiii, wasm_invoke_viiiiiiiiiiii, wasm_invoke_viiiiiiiiiiiii, wasm_invoke_viiiiiiiiiiiiii, wasm_invoke_viiiiiiiiiiiiiii, wasm_invoke_viil, wasm_invoke_vil, wasm_invoke_villi, wasm_invoke_vl, wasm_invoke_vlii, };
/* * GENERATED FILE, DON'T EDIT * Generated by wasm-tuner.exe --gen-interp-to-native */ static void wasm_invoke_v (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(void); T func = (T)target_func; func (); } static void wasm_invoke_vi (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0); T func = (T)target_func; func ((int)(gssize)margs->iargs [0]); } static void wasm_invoke_vii (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, int arg_1); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1]); } static void wasm_invoke_viii (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, int arg_1, int arg_2); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2]); } static void wasm_invoke_viiii (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, int arg_1, int arg_2, int arg_3); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3]); } static void wasm_invoke_viiiii (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, int arg_1, int arg_2, int arg_3, int arg_4); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4]); } static void wasm_invoke_viiiiii (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, int arg_1, int arg_2, int arg_3, int arg_4, int arg_5); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5]); } static void wasm_invoke_viiiiiii (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, int arg_1, int arg_2, int arg_3, int arg_4, int arg_5, int arg_6); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5], (int)(gssize)margs->iargs [6]); } static void wasm_invoke_viiiiiiii (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, int arg_1, int arg_2, int arg_3, int arg_4, int arg_5, int arg_6, int arg_7); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5], (int)(gssize)margs->iargs [6], (int)(gssize)margs->iargs [7]); } static void wasm_invoke_viiiiiiiii (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, int arg_1, int arg_2, int arg_3, int arg_4, int arg_5, int arg_6, int arg_7, int arg_8); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5], (int)(gssize)margs->iargs [6], (int)(gssize)margs->iargs [7], (int)(gssize)margs->iargs [8]); } static void wasm_invoke_viiiiiiiiii (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, int arg_1, int arg_2, int arg_3, int arg_4, int arg_5, int arg_6, int arg_7, int arg_8, int arg_9); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5], (int)(gssize)margs->iargs [6], (int)(gssize)margs->iargs [7], (int)(gssize)margs->iargs [8], (int)(gssize)margs->iargs [9]); } static void wasm_invoke_viiiiiiiiiii (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, int arg_1, int arg_2, int arg_3, int arg_4, int arg_5, int arg_6, int arg_7, int arg_8, int arg_9, int arg_10); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5], (int)(gssize)margs->iargs [6], (int)(gssize)margs->iargs [7], (int)(gssize)margs->iargs [8], (int)(gssize)margs->iargs [9], (int)(gssize)margs->iargs [10]); } static void wasm_invoke_viiiiiiiiiiii (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, int arg_1, int arg_2, int arg_3, int arg_4, int arg_5, int arg_6, int arg_7, int arg_8, int arg_9, int arg_10, int arg_11); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5], (int)(gssize)margs->iargs [6], (int)(gssize)margs->iargs [7], (int)(gssize)margs->iargs [8], (int)(gssize)margs->iargs [9], (int)(gssize)margs->iargs [10], (int)(gssize)margs->iargs [11]); } static void wasm_invoke_viiiiiiiiiiiii (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, int arg_1, int arg_2, int arg_3, int arg_4, int arg_5, int arg_6, int arg_7, int arg_8, int arg_9, int arg_10, int arg_11, int arg_12); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5], (int)(gssize)margs->iargs [6], (int)(gssize)margs->iargs [7], (int)(gssize)margs->iargs [8], (int)(gssize)margs->iargs [9], (int)(gssize)margs->iargs [10], (int)(gssize)margs->iargs [11], (int)(gssize)margs->iargs [12]); } static void wasm_invoke_viiiiiiiiiiiiii (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, int arg_1, int arg_2, int arg_3, int arg_4, int arg_5, int arg_6, int arg_7, int arg_8, int arg_9, int arg_10, int arg_11, int arg_12, int arg_13); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5], (int)(gssize)margs->iargs [6], (int)(gssize)margs->iargs [7], (int)(gssize)margs->iargs [8], (int)(gssize)margs->iargs [9], (int)(gssize)margs->iargs [10], (int)(gssize)margs->iargs [11], (int)(gssize)margs->iargs [12], (int)(gssize)margs->iargs [13]); } static void wasm_invoke_viiiiiiiiiiiiiii (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, int arg_1, int arg_2, int arg_3, int arg_4, int arg_5, int arg_6, int arg_7, int arg_8, int arg_9, int arg_10, int arg_11, int arg_12, int arg_13, int arg_14); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5], (int)(gssize)margs->iargs [6], (int)(gssize)margs->iargs [7], (int)(gssize)margs->iargs [8], (int)(gssize)margs->iargs [9], (int)(gssize)margs->iargs [10], (int)(gssize)margs->iargs [11], (int)(gssize)margs->iargs [12], (int)(gssize)margs->iargs [13], (int)(gssize)margs->iargs [14]); } static void wasm_invoke_i (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(void); T func = (T)target_func; int res = func (); *(int*)margs->retval = res; } static void wasm_invoke_ii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0]); *(int*)margs->retval = res; } static void wasm_invoke_iii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1]); *(int*)margs->retval = res; } static void wasm_invoke_iiii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, int arg_2); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2]); *(int*)margs->retval = res; } static void wasm_invoke_iiiii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, int arg_2, int arg_3); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3]); *(int*)margs->retval = res; } static void wasm_invoke_iiiiii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, int arg_2, int arg_3, int arg_4); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4]); *(int*)margs->retval = res; } static void wasm_invoke_iiiiiii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, int arg_2, int arg_3, int arg_4, int arg_5); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5]); *(int*)margs->retval = res; } static void wasm_invoke_iiiiiiii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, int arg_2, int arg_3, int arg_4, int arg_5, int arg_6); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5], (int)(gssize)margs->iargs [6]); *(int*)margs->retval = res; } static void wasm_invoke_iiiiiiiii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, int arg_2, int arg_3, int arg_4, int arg_5, int arg_6, int arg_7); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5], (int)(gssize)margs->iargs [6], (int)(gssize)margs->iargs [7]); *(int*)margs->retval = res; } static void wasm_invoke_iiiiiiiiii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, int arg_2, int arg_3, int arg_4, int arg_5, int arg_6, int arg_7, int arg_8); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5], (int)(gssize)margs->iargs [6], (int)(gssize)margs->iargs [7], (int)(gssize)margs->iargs [8]); *(int*)margs->retval = res; } static void wasm_invoke_iiiiiiiiiii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, int arg_2, int arg_3, int arg_4, int arg_5, int arg_6, int arg_7, int arg_8, int arg_9); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5], (int)(gssize)margs->iargs [6], (int)(gssize)margs->iargs [7], (int)(gssize)margs->iargs [8], (int)(gssize)margs->iargs [9]); *(int*)margs->retval = res; } static void wasm_invoke_iiiiiiiiiiii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, int arg_2, int arg_3, int arg_4, int arg_5, int arg_6, int arg_7, int arg_8, int arg_9, int arg_10); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5], (int)(gssize)margs->iargs [6], (int)(gssize)margs->iargs [7], (int)(gssize)margs->iargs [8], (int)(gssize)margs->iargs [9], (int)(gssize)margs->iargs [10]); *(int*)margs->retval = res; } static void wasm_invoke_iiiiiiiiiiiii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, int arg_2, int arg_3, int arg_4, int arg_5, int arg_6, int arg_7, int arg_8, int arg_9, int arg_10, int arg_11); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5], (int)(gssize)margs->iargs [6], (int)(gssize)margs->iargs [7], (int)(gssize)margs->iargs [8], (int)(gssize)margs->iargs [9], (int)(gssize)margs->iargs [10], (int)(gssize)margs->iargs [11]); *(int*)margs->retval = res; } static void wasm_invoke_iiiiiiiiiiiiii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, int arg_2, int arg_3, int arg_4, int arg_5, int arg_6, int arg_7, int arg_8, int arg_9, int arg_10, int arg_11, int arg_12); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5], (int)(gssize)margs->iargs [6], (int)(gssize)margs->iargs [7], (int)(gssize)margs->iargs [8], (int)(gssize)margs->iargs [9], (int)(gssize)margs->iargs [10], (int)(gssize)margs->iargs [11], (int)(gssize)margs->iargs [12]); *(int*)margs->retval = res; } static void wasm_invoke_iiliiii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, gint64 arg_1, int arg_2, int arg_3, int arg_4, int arg_5); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], get_long_arg (margs, 1), (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5], (int)(gssize)margs->iargs [6]); *(int*)margs->retval = res; } static void wasm_invoke_iiil (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, gint64 arg_2); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], get_long_arg (margs, 2)); *(int*)margs->retval = res; } static void wasm_invoke_if (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(float arg_0); T func = (T)target_func; int res = func (*(float*)&margs->fargs [FIDX (0)]); *(int*)margs->retval = res; } static void wasm_invoke_id (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(double arg_0); T func = (T)target_func; int res = func (margs->fargs [FIDX (0)]); *(int*)margs->retval = res; } static void wasm_invoke_iif (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, float arg_1); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)]); *(int*)margs->retval = res; } static void wasm_invoke_iifi (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, float arg_1, int arg_2); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)], (int)(gssize)margs->iargs [1]); *(int*)margs->retval = res; } static void wasm_invoke_iiff (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, float arg_1, float arg_2); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)]); *(int*)margs->retval = res; } static void wasm_invoke_iffii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(float arg_0, float arg_1, int arg_2, int arg_3); T func = (T)target_func; int res = func (*(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], (int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1]); *(int*)margs->retval = res; } static void wasm_invoke_iifii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, float arg_1, int arg_2, int arg_3); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2]); *(int*)margs->retval = res; } static void wasm_invoke_iiffi (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, float arg_1, float arg_2, int arg_3); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], (int)(gssize)margs->iargs [1]); *(int*)margs->retval = res; } static void wasm_invoke_iifff (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, float arg_1, float arg_2, float arg_3); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)]); *(int*)margs->retval = res; } static void wasm_invoke_iifffi (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, float arg_1, float arg_2, float arg_3, int arg_4); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)], (int)(gssize)margs->iargs [1]); *(int*)margs->retval = res; } static void wasm_invoke_iiffii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, float arg_1, float arg_2, int arg_3, int arg_4); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2]); *(int*)margs->retval = res; } static void wasm_invoke_iifiii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, float arg_1, int arg_2, int arg_3, int arg_4); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3]); *(int*)margs->retval = res; } static void wasm_invoke_iiffffi (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, float arg_1, float arg_2, float arg_3, float arg_4, int arg_5); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)], *(float*)&margs->fargs [FIDX (3)], (int)(gssize)margs->iargs [1]); *(int*)margs->retval = res; } static void wasm_invoke_iiffffii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, float arg_1, float arg_2, float arg_3, float arg_4, int arg_5, int arg_6); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)], *(float*)&margs->fargs [FIDX (3)], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2]); *(int*)margs->retval = res; } static void wasm_invoke_iiif (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, float arg_2); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], *(float*)&margs->fargs [FIDX (0)]); *(int*)margs->retval = res; } static void wasm_invoke_iiifi (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, float arg_2, int arg_3); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], *(float*)&margs->fargs [FIDX (0)], (int)(gssize)margs->iargs [2]); *(int*)margs->retval = res; } static void wasm_invoke_iiifii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, float arg_2, int arg_3, int arg_4); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], *(float*)&margs->fargs [FIDX (0)], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3]); *(int*)margs->retval = res; } static void wasm_invoke_iiifiii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, float arg_2, int arg_3, int arg_4, int arg_5); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], *(float*)&margs->fargs [FIDX (0)], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4]); *(int*)margs->retval = res; } static void wasm_invoke_iiiif (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, int arg_2, float arg_3); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], *(float*)&margs->fargs [FIDX (0)]); *(int*)margs->retval = res; } static void wasm_invoke_iiiifi (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, int arg_2, float arg_3, int arg_4); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], *(float*)&margs->fargs [FIDX (0)], (int)(gssize)margs->iargs [3]); *(int*)margs->retval = res; } static void wasm_invoke_iiiifii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, int arg_2, float arg_3, int arg_4, int arg_5); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], *(float*)&margs->fargs [FIDX (0)], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4]); *(int*)margs->retval = res; } static void wasm_invoke_iiiifiii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, int arg_2, float arg_3, int arg_4, int arg_5, int arg_6); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], *(float*)&margs->fargs [FIDX (0)], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5]); *(int*)margs->retval = res; } static void wasm_invoke_iiiffff (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, float arg_2, float arg_3, float arg_4, float arg_5); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)], *(float*)&margs->fargs [FIDX (3)]); *(int*)margs->retval = res; } static void wasm_invoke_iiifffff (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, float arg_2, float arg_3, float arg_4, float arg_5, float arg_6); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)], *(float*)&margs->fargs [FIDX (3)], *(float*)&margs->fargs [FIDX (4)]); *(int*)margs->retval = res; } static void wasm_invoke_iiffffff (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, float arg_1, float arg_2, float arg_3, float arg_4, float arg_5, float arg_6); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)], *(float*)&margs->fargs [FIDX (3)], *(float*)&margs->fargs [FIDX (4)], *(float*)&margs->fargs [FIDX (5)]); *(int*)margs->retval = res; } static void wasm_invoke_iiiffffff (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, float arg_2, float arg_3, float arg_4, float arg_5, float arg_6, float arg_7); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)], *(float*)&margs->fargs [FIDX (3)], *(float*)&margs->fargs [FIDX (4)], *(float*)&margs->fargs [FIDX (5)]); *(int*)margs->retval = res; } static void wasm_invoke_iiiiiiif (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, int arg_2, int arg_3, int arg_4, int arg_5, float arg_6); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5], *(float*)&margs->fargs [FIDX (0)]); *(int*)margs->retval = res; } static void wasm_invoke_iiiiiiiff (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, int arg_2, int arg_3, int arg_4, int arg_5, float arg_6, float arg_7); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)]); *(int*)margs->retval = res; } static void wasm_invoke_iiffffffff (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, float arg_1, float arg_2, float arg_3, float arg_4, float arg_5, float arg_6, float arg_7, float arg_8); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)], *(float*)&margs->fargs [FIDX (3)], *(float*)&margs->fargs [FIDX (4)], *(float*)&margs->fargs [FIDX (5)], *(float*)&margs->fargs [FIDX (6)], *(float*)&margs->fargs [FIDX (7)]); *(int*)margs->retval = res; } static void wasm_invoke_iiiffffffff (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, float arg_2, float arg_3, float arg_4, float arg_5, float arg_6, float arg_7, float arg_8, float arg_9); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)], *(float*)&margs->fargs [FIDX (3)], *(float*)&margs->fargs [FIDX (4)], *(float*)&margs->fargs [FIDX (5)], *(float*)&margs->fargs [FIDX (6)], *(float*)&margs->fargs [FIDX (7)]); *(int*)margs->retval = res; } static void wasm_invoke_iiiiiifii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, int arg_2, int arg_3, int arg_4, float arg_5, int arg_6, int arg_7); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], *(float*)&margs->fargs [FIDX (0)], (int)(gssize)margs->iargs [5], (int)(gssize)margs->iargs [6]); *(int*)margs->retval = res; } static void wasm_invoke_iiiffffffffiii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, float arg_2, float arg_3, float arg_4, float arg_5, float arg_6, float arg_7, float arg_8, float arg_9, int arg_10, int arg_11, int arg_12); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)], *(float*)&margs->fargs [FIDX (3)], *(float*)&margs->fargs [FIDX (4)], *(float*)&margs->fargs [FIDX (5)], *(float*)&margs->fargs [FIDX (6)], *(float*)&margs->fargs [FIDX (7)], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4]); *(int*)margs->retval = res; } static void wasm_invoke_iiiiiffffiiii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, int arg_2, int arg_3, float arg_4, float arg_5, float arg_6, float arg_7, int arg_8, int arg_9, int arg_10, int arg_11); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)], *(float*)&margs->fargs [FIDX (3)], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5], (int)(gssize)margs->iargs [6], (int)(gssize)margs->iargs [7]); *(int*)margs->retval = res; } static void wasm_invoke_iffffffi (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(float arg_0, float arg_1, float arg_2, float arg_3, float arg_4, float arg_5, int arg_6); T func = (T)target_func; int res = func (*(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)], *(float*)&margs->fargs [FIDX (3)], *(float*)&margs->fargs [FIDX (4)], *(float*)&margs->fargs [FIDX (5)], (int)(gssize)margs->iargs [0]); *(int*)margs->retval = res; } static void wasm_invoke_iiffiii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, float arg_1, float arg_2, int arg_3, int arg_4, int arg_5); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3]); *(int*)margs->retval = res; } static void wasm_invoke_ili (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(gint64 arg_0, int arg_1); T func = (T)target_func; int res = func (get_long_arg (margs, 0), (int)(gssize)margs->iargs [2]); *(int*)margs->retval = res; } static void wasm_invoke_iilli (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, gint64 arg_1, gint64 arg_2, int arg_3); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], get_long_arg (margs, 1), get_long_arg (margs, 3), (int)(gssize)margs->iargs [5]); *(int*)margs->retval = res; } static void wasm_invoke_l (void *target_func, InterpMethodArguments *margs) { typedef gint64 (*T)(void); T func = (T)target_func; gint64 res = func (); *(gint64*)margs->retval = res; } static void wasm_invoke_ll (void *target_func, InterpMethodArguments *margs) { typedef gint64 (*T)(gint64 arg_0); T func = (T)target_func; gint64 res = func (get_long_arg (margs, 0)); *(gint64*)margs->retval = res; } static void wasm_invoke_li (void *target_func, InterpMethodArguments *margs) { typedef gint64 (*T)(int arg_0); T func = (T)target_func; gint64 res = func ((int)(gssize)margs->iargs [0]); *(gint64*)margs->retval = res; } static void wasm_invoke_lil (void *target_func, InterpMethodArguments *margs) { typedef gint64 (*T)(int arg_0, gint64 arg_1); T func = (T)target_func; gint64 res = func ((int)(gssize)margs->iargs [0], get_long_arg (margs, 1)); *(gint64*)margs->retval = res; } static void wasm_invoke_lili (void *target_func, InterpMethodArguments *margs) { typedef gint64 (*T)(int arg_0, gint64 arg_1, int arg_2); T func = (T)target_func; gint64 res = func ((int)(gssize)margs->iargs [0], get_long_arg (margs, 1), (int)(gssize)margs->iargs [3]); *(gint64*)margs->retval = res; } static void wasm_invoke_lilii (void *target_func, InterpMethodArguments *margs) { typedef gint64 (*T)(int arg_0, gint64 arg_1, int arg_2, int arg_3); T func = (T)target_func; gint64 res = func ((int)(gssize)margs->iargs [0], get_long_arg (margs, 1), (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4]); *(gint64*)margs->retval = res; } static void wasm_invoke_dd (void *target_func, InterpMethodArguments *margs) { typedef double (*T)(double arg_0); T func = (T)target_func; double res = func (margs->fargs [FIDX (0)]); *(double*)margs->retval = res; } static void wasm_invoke_ddi (void *target_func, InterpMethodArguments *margs) { typedef double (*T)(double arg_0, int arg_1); T func = (T)target_func; double res = func (margs->fargs [FIDX (0)], (int)(gssize)margs->iargs [0]); *(double*)margs->retval = res; } static void wasm_invoke_ddd (void *target_func, InterpMethodArguments *margs) { typedef double (*T)(double arg_0, double arg_1); T func = (T)target_func; double res = func (margs->fargs [FIDX (0)], margs->fargs [FIDX (1)]); *(double*)margs->retval = res; } static void wasm_invoke_dddd (void *target_func, InterpMethodArguments *margs) { typedef double (*T)(double arg_0, double arg_1, double arg_2); T func = (T)target_func; double res = func (margs->fargs [FIDX (0)], margs->fargs [FIDX (1)], margs->fargs [FIDX (2)]); *(double*)margs->retval = res; } static void wasm_invoke_vf (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(float arg_0); T func = (T)target_func; func (*(float*)&margs->fargs [FIDX (0)]); } static void wasm_invoke_vff (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(float arg_0, float arg_1); T func = (T)target_func; func (*(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)]); } static void wasm_invoke_vfff (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(float arg_0, float arg_1, float arg_2); T func = (T)target_func; func (*(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)]); } static void wasm_invoke_vffff (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(float arg_0, float arg_1, float arg_2, float arg_3); T func = (T)target_func; func (*(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)], *(float*)&margs->fargs [FIDX (3)]); } static void wasm_invoke_vfffff (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(float arg_0, float arg_1, float arg_2, float arg_3, float arg_4); T func = (T)target_func; func (*(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)], *(float*)&margs->fargs [FIDX (3)], *(float*)&margs->fargs [FIDX (4)]); } static void wasm_invoke_vffffff (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(float arg_0, float arg_1, float arg_2, float arg_3, float arg_4, float arg_5); T func = (T)target_func; func (*(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)], *(float*)&margs->fargs [FIDX (3)], *(float*)&margs->fargs [FIDX (4)], *(float*)&margs->fargs [FIDX (5)]); } static void wasm_invoke_vfffffff (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(float arg_0, float arg_1, float arg_2, float arg_3, float arg_4, float arg_5, float arg_6); T func = (T)target_func; func (*(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)], *(float*)&margs->fargs [FIDX (3)], *(float*)&margs->fargs [FIDX (4)], *(float*)&margs->fargs [FIDX (5)], *(float*)&margs->fargs [FIDX (6)]); } static void wasm_invoke_vffffffff (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(float arg_0, float arg_1, float arg_2, float arg_3, float arg_4, float arg_5, float arg_6, float arg_7); T func = (T)target_func; func (*(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)], *(float*)&margs->fargs [FIDX (3)], *(float*)&margs->fargs [FIDX (4)], *(float*)&margs->fargs [FIDX (5)], *(float*)&margs->fargs [FIDX (6)], *(float*)&margs->fargs [FIDX (7)]); } static void wasm_invoke_vfi (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(float arg_0, int arg_1); T func = (T)target_func; func (*(float*)&margs->fargs [FIDX (0)], (int)(gssize)margs->iargs [0]); } static void wasm_invoke_vif (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, float arg_1); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)]); } static void wasm_invoke_viff (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, float arg_1, float arg_2); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)]); } static void wasm_invoke_viffff (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, float arg_1, float arg_2, float arg_3, float arg_4); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)], *(float*)&margs->fargs [FIDX (3)]); } static void wasm_invoke_vifffff (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, float arg_1, float arg_2, float arg_3, float arg_4, float arg_5); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)], *(float*)&margs->fargs [FIDX (3)], *(float*)&margs->fargs [FIDX (4)]); } static void wasm_invoke_viffffff (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, float arg_1, float arg_2, float arg_3, float arg_4, float arg_5, float arg_6); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)], *(float*)&margs->fargs [FIDX (3)], *(float*)&margs->fargs [FIDX (4)], *(float*)&margs->fargs [FIDX (5)]); } static void wasm_invoke_vifffffi (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, float arg_1, float arg_2, float arg_3, float arg_4, float arg_5, int arg_6); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)], *(float*)&margs->fargs [FIDX (3)], *(float*)&margs->fargs [FIDX (4)], (int)(gssize)margs->iargs [1]); } static void wasm_invoke_viiffi (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, int arg_1, float arg_2, float arg_3, int arg_4); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], (int)(gssize)margs->iargs [2]); } static void wasm_invoke_viif (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, int arg_1, float arg_2); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], *(float*)&margs->fargs [FIDX (0)]); } static void wasm_invoke_viifff (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, int arg_1, float arg_2, float arg_3, float arg_4); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)]); } static void wasm_invoke_viifi (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, int arg_1, float arg_2, int arg_3); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], *(float*)&margs->fargs [FIDX (0)], (int)(gssize)margs->iargs [2]); } static void wasm_invoke_ff (void *target_func, InterpMethodArguments *margs) { typedef float (*T)(float arg_0); T func = (T)target_func; float res = func (*(float*)&margs->fargs [FIDX (0)]); *(float*)margs->retval = res; } static void wasm_invoke_ffi (void *target_func, InterpMethodArguments *margs) { typedef float (*T)(float arg_0, int arg_1); T func = (T)target_func; float res = func (*(float*)&margs->fargs [FIDX (0)], (int)(gssize)margs->iargs [0]); *(float*)margs->retval = res; } static void wasm_invoke_fff (void *target_func, InterpMethodArguments *margs) { typedef float (*T)(float arg_0, float arg_1); T func = (T)target_func; float res = func (*(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)]); *(float*)margs->retval = res; } static void wasm_invoke_ffff (void *target_func, InterpMethodArguments *margs) { typedef float (*T)(float arg_0, float arg_1, float arg_2); T func = (T)target_func; float res = func (*(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)]); *(float*)margs->retval = res; } static void wasm_invoke_di (void *target_func, InterpMethodArguments *margs) { typedef double (*T)(int arg_0); T func = (T)target_func; double res = func ((int)(gssize)margs->iargs [0]); *(double*)margs->retval = res; } static void wasm_invoke_fi (void *target_func, InterpMethodArguments *margs) { typedef float (*T)(int arg_0); T func = (T)target_func; float res = func ((int)(gssize)margs->iargs [0]); *(float*)margs->retval = res; } static void wasm_invoke_iil (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, gint64 arg_1); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], get_long_arg (margs, 1)); *(int*)margs->retval = res; } static void wasm_invoke_iili (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, gint64 arg_1, int arg_2); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], get_long_arg (margs, 1), (int)(gssize)margs->iargs [3]); *(int*)margs->retval = res; } static void wasm_invoke_iiliiil (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, gint64 arg_1, int arg_2, int arg_3, int arg_4, gint64 arg_5); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], get_long_arg (margs, 1), (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5], get_long_arg (margs, 6)); *(int*)margs->retval = res; } static void wasm_invoke_iillli (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, gint64 arg_1, gint64 arg_2, gint64 arg_3, int arg_4); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], get_long_arg (margs, 1), get_long_arg (margs, 3), get_long_arg (margs, 5), (int)(gssize)margs->iargs [7]); *(int*)margs->retval = res; } static void wasm_invoke_idiii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(double arg_0, int arg_1, int arg_2, int arg_3); T func = (T)target_func; int res = func (margs->fargs [FIDX (0)], (int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2]); *(int*)margs->retval = res; } static void wasm_invoke_lii (void *target_func, InterpMethodArguments *margs) { typedef gint64 (*T)(int arg_0, int arg_1); T func = (T)target_func; gint64 res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1]); *(gint64*)margs->retval = res; } static void wasm_invoke_vid (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, double arg_1); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], margs->fargs [FIDX (0)]); } static void wasm_invoke_villi (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, gint64 arg_1, gint64 arg_2, int arg_3); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], get_long_arg (margs, 1), get_long_arg (margs, 3), (int)(gssize)margs->iargs [5]); } static void wasm_invoke_did (void *target_func, InterpMethodArguments *margs) { typedef double (*T)(int arg_0, double arg_1); T func = (T)target_func; double res = func ((int)(gssize)margs->iargs [0], margs->fargs [FIDX (0)]); *(double*)margs->retval = res; } static void wasm_invoke_didd (void *target_func, InterpMethodArguments *margs) { typedef double (*T)(int arg_0, double arg_1, double arg_2); T func = (T)target_func; double res = func ((int)(gssize)margs->iargs [0], margs->fargs [FIDX (0)], margs->fargs [FIDX (1)]); *(double*)margs->retval = res; } static void wasm_invoke_fif (void *target_func, InterpMethodArguments *margs) { typedef float (*T)(int arg_0, float arg_1); T func = (T)target_func; float res = func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)]); *(float*)margs->retval = res; } static void wasm_invoke_fiff (void *target_func, InterpMethodArguments *margs) { typedef float (*T)(int arg_0, float arg_1, float arg_2); T func = (T)target_func; float res = func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)]); *(float*)margs->retval = res; } static void wasm_invoke_lill (void *target_func, InterpMethodArguments *margs) { typedef gint64 (*T)(int arg_0, gint64 arg_1, gint64 arg_2); T func = (T)target_func; gint64 res = func ((int)(gssize)margs->iargs [0], get_long_arg (margs, 1), get_long_arg (margs, 3)); *(gint64*)margs->retval = res; } static void wasm_invoke_vl (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(gint64 arg_0); T func = (T)target_func; func (get_long_arg (margs, 0)); } static void wasm_invoke_vil (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, gint64 arg_1); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], get_long_arg (margs, 1)); } static void wasm_invoke_viil (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, int arg_1, gint64 arg_2); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], get_long_arg (margs, 2)); } static void wasm_invoke_fifff (void *target_func, InterpMethodArguments *margs) { typedef float (*T)(int arg_0, float arg_1, float arg_2, float arg_3); T func = (T)target_func; float res = func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)]); *(float*)margs->retval = res; } static void wasm_invoke_fii (void *target_func, InterpMethodArguments *margs) { typedef float (*T)(int arg_0, int arg_1); T func = (T)target_func; float res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1]); *(float*)margs->retval = res; } static void wasm_invoke_fiii (void *target_func, InterpMethodArguments *margs) { typedef float (*T)(int arg_0, int arg_1, int arg_2); T func = (T)target_func; float res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2]); *(float*)margs->retval = res; } static void wasm_invoke_fiiiiii (void *target_func, InterpMethodArguments *margs) { typedef float (*T)(int arg_0, int arg_1, int arg_2, int arg_3, int arg_4, int arg_5); T func = (T)target_func; float res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5]); *(float*)margs->retval = res; } static void wasm_invoke_iffffiiii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(float arg_0, float arg_1, float arg_2, float arg_3, int arg_4, int arg_5, int arg_6, int arg_7); T func = (T)target_func; int res = func (*(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)], *(float*)&margs->fargs [FIDX (3)], (int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3]); *(int*)margs->retval = res; } static void wasm_invoke_iffi (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(float arg_0, float arg_1, int arg_2); T func = (T)target_func; int res = func (*(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], (int)(gssize)margs->iargs [0]); *(int*)margs->retval = res; } static void wasm_invoke_iffif (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(float arg_0, float arg_1, int arg_2, float arg_3); T func = (T)target_func; int res = func (*(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], (int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (2)]); *(int*)margs->retval = res; } static void wasm_invoke_iffifi (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(float arg_0, float arg_1, int arg_2, float arg_3, int arg_4); T func = (T)target_func; int res = func (*(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], (int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (2)], (int)(gssize)margs->iargs [1]); *(int*)margs->retval = res; } static void wasm_invoke_ifi (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(float arg_0, int arg_1); T func = (T)target_func; int res = func (*(float*)&margs->fargs [FIDX (0)], (int)(gssize)margs->iargs [0]); *(int*)margs->retval = res; } static void wasm_invoke_ifiii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(float arg_0, int arg_1, int arg_2, int arg_3); T func = (T)target_func; int res = func (*(float*)&margs->fargs [FIDX (0)], (int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2]); *(int*)margs->retval = res; } static void wasm_invoke_iififiiiii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, float arg_1, int arg_2, float arg_3, int arg_4, int arg_5, int arg_6, int arg_7, int arg_8); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)], (int)(gssize)margs->iargs [1], *(float*)&margs->fargs [FIDX (1)], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5], (int)(gssize)margs->iargs [6]); *(int*)margs->retval = res; } static void wasm_invoke_iififiiiiii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, float arg_1, int arg_2, float arg_3, int arg_4, int arg_5, int arg_6, int arg_7, int arg_8, int arg_9); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)], (int)(gssize)margs->iargs [1], *(float*)&margs->fargs [FIDX (1)], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5], (int)(gssize)margs->iargs [6], (int)(gssize)margs->iargs [7]); *(int*)margs->retval = res; } static void wasm_invoke_iifiiiii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, float arg_1, int arg_2, int arg_3, int arg_4, int arg_5, int arg_6); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5]); *(int*)margs->retval = res; } static void wasm_invoke_iifiiiiii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, float arg_1, int arg_2, int arg_3, int arg_4, int arg_5, int arg_6, int arg_7); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5], (int)(gssize)margs->iargs [6]); *(int*)margs->retval = res; } static void wasm_invoke_iiifffii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, float arg_2, float arg_3, float arg_4, int arg_5, int arg_6); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3]); *(int*)margs->retval = res; } static void wasm_invoke_iiiffifffii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, float arg_2, float arg_3, int arg_4, float arg_5, float arg_6, float arg_7, int arg_8, int arg_9); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], (int)(gssize)margs->iargs [2], *(float*)&margs->fargs [FIDX (2)], *(float*)&margs->fargs [FIDX (3)], *(float*)&margs->fargs [FIDX (4)], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4]); *(int*)margs->retval = res; } static void wasm_invoke_iiiffiffii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, float arg_2, float arg_3, int arg_4, float arg_5, float arg_6, int arg_7, int arg_8); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], (int)(gssize)margs->iargs [2], *(float*)&margs->fargs [FIDX (2)], *(float*)&margs->fargs [FIDX (3)], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4]); *(int*)margs->retval = res; } static void wasm_invoke_iiiffii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, float arg_2, float arg_3, int arg_4, int arg_5); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3]); *(int*)margs->retval = res; } static void wasm_invoke_iiiffiiiii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, float arg_2, float arg_3, int arg_4, int arg_5, int arg_6, int arg_7, int arg_8); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5], (int)(gssize)margs->iargs [6]); *(int*)margs->retval = res; } static void wasm_invoke_iiiiif (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, int arg_2, int arg_3, float arg_4); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], *(float*)&margs->fargs [FIDX (0)]); *(int*)margs->retval = res; } static void wasm_invoke_iiiiifii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, int arg_2, int arg_3, float arg_4, int arg_5, int arg_6); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], *(float*)&margs->fargs [FIDX (0)], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5]); *(int*)margs->retval = res; } static void wasm_invoke_iiiiiiffi (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, int arg_2, int arg_3, int arg_4, float arg_5, float arg_6, int arg_7); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], (int)(gssize)margs->iargs [5]); *(int*)margs->retval = res; } static void wasm_invoke_iiiiiiiffi (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, int arg_2, int arg_3, int arg_4, int arg_5, float arg_6, float arg_7, int arg_8); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], (int)(gssize)margs->iargs [6]); *(int*)margs->retval = res; } static void wasm_invoke_vifff (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, float arg_1, float arg_2, float arg_3); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)]); } static void wasm_invoke_viffffi (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, float arg_1, float arg_2, float arg_3, float arg_4, int arg_5); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)], *(float*)&margs->fargs [FIDX (3)], (int)(gssize)margs->iargs [1]); } static void wasm_invoke_vifffi (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, float arg_1, float arg_2, float arg_3, int arg_4); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)], (int)(gssize)margs->iargs [1]); } static void wasm_invoke_vifffiiff (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, float arg_1, float arg_2, float arg_3, int arg_4, int arg_5, float arg_6, float arg_7); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], *(float*)&margs->fargs [FIDX (3)], *(float*)&margs->fargs [FIDX (4)]); } static void wasm_invoke_viffi (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, float arg_1, float arg_2, int arg_3); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], (int)(gssize)margs->iargs [1]); } static void wasm_invoke_vifi (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, float arg_1, int arg_2); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)], (int)(gssize)margs->iargs [1]); } static void wasm_invoke_viiff (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, int arg_1, float arg_2, float arg_3); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)]); } static void wasm_invoke_viiffff (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, int arg_1, float arg_2, float arg_3, float arg_4, float arg_5); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)], *(float*)&margs->fargs [FIDX (3)]); } static void wasm_invoke_viiffii (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, int arg_1, float arg_2, float arg_3, int arg_4, int arg_5); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3]); } static void wasm_invoke_viiif (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, int arg_1, int arg_2, float arg_3); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], *(float*)&margs->fargs [FIDX (0)]); } static void wasm_invoke_viiiffii (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, int arg_1, int arg_2, float arg_3, float arg_4, int arg_5, int arg_6); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4]); } static void wasm_invoke_viiiffiii (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, int arg_1, int arg_2, float arg_3, float arg_4, int arg_5, int arg_6, int arg_7); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5]); } static void wasm_invoke_viiifi (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, int arg_1, int arg_2, float arg_3, int arg_4); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], *(float*)&margs->fargs [FIDX (0)], (int)(gssize)margs->iargs [3]); } static void wasm_invoke_viiifii (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, int arg_1, int arg_2, float arg_3, int arg_4, int arg_5); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], *(float*)&margs->fargs [FIDX (0)], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4]); } static void wasm_invoke_viiifiii (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, int arg_1, int arg_2, float arg_3, int arg_4, int arg_5, int arg_6); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], *(float*)&margs->fargs [FIDX (0)], (int)(gssize)margs->iargs [3], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5]); } static void wasm_invoke_viiiif (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, int arg_1, int arg_2, int arg_3, float arg_4); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], *(float*)&margs->fargs [FIDX (0)]); } static void wasm_invoke_iffffiii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(float arg_0, float arg_1, float arg_2, float arg_3, int arg_4, int arg_5, int arg_6); T func = (T)target_func; int res = func (*(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)], *(float*)&margs->fargs [FIDX (3)], (int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2]); *(int*)margs->retval = res; } static void wasm_invoke_iffiii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(float arg_0, float arg_1, int arg_2, int arg_3, int arg_4); T func = (T)target_func; int res = func (*(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], (int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2]); *(int*)margs->retval = res; } static void wasm_invoke_viiiiffii (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(int arg_0, int arg_1, int arg_2, int arg_3, float arg_4, float arg_5, int arg_6, int arg_7); T func = (T)target_func; func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5]); } static void wasm_invoke_iiiliiii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, gint64 arg_2, int arg_3, int arg_4, int arg_5, int arg_6); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], get_long_arg (margs, 2), (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5], (int)(gssize)margs->iargs [6], (int)(gssize)margs->iargs [7]); *(int*)margs->retval = res; } static void wasm_invoke_iiilli (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, gint64 arg_2, gint64 arg_3, int arg_4); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], get_long_arg (margs, 2), get_long_arg (margs, 4), (int)(gssize)margs->iargs [6]); *(int*)margs->retval = res; } static void wasm_invoke_il (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(gint64 arg_0); T func = (T)target_func; int res = func (get_long_arg (margs, 0)); *(int*)margs->retval = res; } static void wasm_invoke_iff (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(float arg_0, float arg_1); T func = (T)target_func; int res = func (*(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)]); *(int*)margs->retval = res; } static void wasm_invoke_ifff (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(float arg_0, float arg_1, float arg_2); T func = (T)target_func; int res = func (*(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)]); *(int*)margs->retval = res; } static void wasm_invoke_iffff (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(float arg_0, float arg_1, float arg_2, float arg_3); T func = (T)target_func; int res = func (*(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)], *(float*)&margs->fargs [FIDX (3)]); *(int*)margs->retval = res; } static void wasm_invoke_vlii (void *target_func, InterpMethodArguments *margs) { typedef void (*T)(gint64 arg_0, int arg_1, int arg_2); T func = (T)target_func; func (get_long_arg (margs, 0), (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3]); } static void wasm_invoke_iiiil (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, int arg_2, gint64 arg_3); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], get_long_arg (margs, 3)); *(int*)margs->retval = res; } static void wasm_invoke_liiil (void *target_func, InterpMethodArguments *margs) { typedef gint64 (*T)(int arg_0, int arg_1, int arg_2, gint64 arg_3); T func = (T)target_func; gint64 res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], get_long_arg (margs, 3)); *(gint64*)margs->retval = res; } static void wasm_invoke_iill (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, gint64 arg_1, gint64 arg_2); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], get_long_arg (margs, 1), get_long_arg (margs, 3)); *(int*)margs->retval = res; } static void wasm_invoke_iiffff (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, float arg_1, float arg_2, float arg_3, float arg_4); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], *(float*)&margs->fargs [FIDX (0)], *(float*)&margs->fargs [FIDX (1)], *(float*)&margs->fargs [FIDX (2)], *(float*)&margs->fargs [FIDX (3)]); *(int*)margs->retval = res; } static void wasm_invoke_iiiiidii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, int arg_2, int arg_3, double arg_4, int arg_5, int arg_6); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], margs->fargs [FIDX (0)], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5]); *(int*)margs->retval = res; } static void wasm_invoke_iiiiifiii (void *target_func, InterpMethodArguments *margs) { typedef int (*T)(int arg_0, int arg_1, int arg_2, int arg_3, float arg_4, int arg_5, int arg_6, int arg_7); T func = (T)target_func; int res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3], *(float*)&margs->fargs [FIDX (0)], (int)(gssize)margs->iargs [4], (int)(gssize)margs->iargs [5], (int)(gssize)margs->iargs [6]); *(int*)margs->retval = res; } static void wasm_invoke_liiii (void *target_func, InterpMethodArguments *margs) { typedef gint64 (*T)(int arg_0, int arg_1, int arg_2, int arg_3); T func = (T)target_func; gint64 res = func ((int)(gssize)margs->iargs [0], (int)(gssize)margs->iargs [1], (int)(gssize)margs->iargs [2], (int)(gssize)margs->iargs [3]); *(gint64*)margs->retval = res; } static const char* interp_to_native_signatures [] = { "DD", "DDD", "DDDD", "DDI", "DI", "DID", "DIDD", "FF", "FFF", "FFFF", "FFI", "FI", "FIF", "FIFF", "FIFFF", "FII", "FIII", "FIIIIII", "I", "ID", "IDIII", "IF", "IFF", "IFFF", "IFFFF", "IFFFFFFI", "IFFFFIII", "IFFFFIIII", "IFFI", "IFFIF", "IFFIFI", "IFFII", "IFFIII", "IFI", "IFIII", "II", "IIF", "IIFF", "IIFFF", "IIFFFF", "IIFFFFFF", "IIFFFFFFFF", "IIFFFFI", "IIFFFFII", "IIFFFI", "IIFFI", "IIFFII", "IIFFIII", "IIFI", "IIFIFIIIII", "IIFIFIIIIII", "IIFII", "IIFIII", "IIFIIIII", "IIFIIIIII", "III", "IIIF", "IIIFFFF", "IIIFFFFF", "IIIFFFFFF", "IIIFFFFFFFF", "IIIFFFFFFFFIII", "IIIFFFII", "IIIFFIFFFII", "IIIFFIFFII", "IIIFFII", "IIIFFIIIII", "IIIFI", "IIIFII", "IIIFIII", "IIII", "IIIIF", "IIIIFI", "IIIIFII", "IIIIFIII", "IIIII", "IIIIIDII", "IIIIIF", "IIIIIFFFFIIII", "IIIIIFII", "IIIIIFIII", "IIIIII", "IIIIIIFFI", "IIIIIIFII", "IIIIIII", "IIIIIIIF", "IIIIIIIFF", "IIIIIIIFFI", "IIIIIIII", "IIIIIIIII", "IIIIIIIIII", "IIIIIIIIIII", "IIIIIIIIIIII", "IIIIIIIIIIIII", "IIIIIIIIIIIIII", "IIIIL", "IIIL", "IIILIIII", "IIILLI", "IIL", "IILI", "IILIIII", "IILIIIL", "IILL", "IILLI", "IILLLI", "IL", "ILI", "L", "LI", "LII", "LIIII", "LIIIL", "LIL", "LILI", "LILII", "LILL", "LL", "V", "VF", "VFF", "VFFF", "VFFFF", "VFFFFF", "VFFFFFF", "VFFFFFFF", "VFFFFFFFF", "VFI", "VI", "VID", "VIF", "VIFF", "VIFFF", "VIFFFF", "VIFFFFF", "VIFFFFFF", "VIFFFFFI", "VIFFFFI", "VIFFFI", "VIFFFIIFF", "VIFFI", "VIFI", "VII", "VIIF", "VIIFF", "VIIFFF", "VIIFFFF", "VIIFFI", "VIIFFII", "VIIFI", "VIII", "VIIIF", "VIIIFFII", "VIIIFFIII", "VIIIFI", "VIIIFII", "VIIIFIII", "VIIII", "VIIIIF", "VIIIIFFII", "VIIIII", "VIIIIII", "VIIIIIII", "VIIIIIIII", "VIIIIIIIII", "VIIIIIIIIII", "VIIIIIIIIIII", "VIIIIIIIIIIII", "VIIIIIIIIIIIII", "VIIIIIIIIIIIIII", "VIIIIIIIIIIIIIII", "VIIL", "VIL", "VILLI", "VL", "VLII", }; static void* interp_to_native_invokes [] = { wasm_invoke_dd, wasm_invoke_ddd, wasm_invoke_dddd, wasm_invoke_ddi, wasm_invoke_di, wasm_invoke_did, wasm_invoke_didd, wasm_invoke_ff, wasm_invoke_fff, wasm_invoke_ffff, wasm_invoke_ffi, wasm_invoke_fi, wasm_invoke_fif, wasm_invoke_fiff, wasm_invoke_fifff, wasm_invoke_fii, wasm_invoke_fiii, wasm_invoke_fiiiiii, wasm_invoke_i, wasm_invoke_id, wasm_invoke_idiii, wasm_invoke_if, wasm_invoke_iff, wasm_invoke_ifff, wasm_invoke_iffff, wasm_invoke_iffffffi, wasm_invoke_iffffiii, wasm_invoke_iffffiiii, wasm_invoke_iffi, wasm_invoke_iffif, wasm_invoke_iffifi, wasm_invoke_iffii, wasm_invoke_iffiii, wasm_invoke_ifi, wasm_invoke_ifiii, wasm_invoke_ii, wasm_invoke_iif, wasm_invoke_iiff, wasm_invoke_iifff, wasm_invoke_iiffff, wasm_invoke_iiffffff, wasm_invoke_iiffffffff, wasm_invoke_iiffffi, wasm_invoke_iiffffii, wasm_invoke_iifffi, wasm_invoke_iiffi, wasm_invoke_iiffii, wasm_invoke_iiffiii, wasm_invoke_iifi, wasm_invoke_iififiiiii, wasm_invoke_iififiiiiii, wasm_invoke_iifii, wasm_invoke_iifiii, wasm_invoke_iifiiiii, wasm_invoke_iifiiiiii, wasm_invoke_iii, wasm_invoke_iiif, wasm_invoke_iiiffff, wasm_invoke_iiifffff, wasm_invoke_iiiffffff, wasm_invoke_iiiffffffff, wasm_invoke_iiiffffffffiii, wasm_invoke_iiifffii, wasm_invoke_iiiffifffii, wasm_invoke_iiiffiffii, wasm_invoke_iiiffii, wasm_invoke_iiiffiiiii, wasm_invoke_iiifi, wasm_invoke_iiifii, wasm_invoke_iiifiii, wasm_invoke_iiii, wasm_invoke_iiiif, wasm_invoke_iiiifi, wasm_invoke_iiiifii, wasm_invoke_iiiifiii, wasm_invoke_iiiii, wasm_invoke_iiiiidii, wasm_invoke_iiiiif, wasm_invoke_iiiiiffffiiii, wasm_invoke_iiiiifii, wasm_invoke_iiiiifiii, wasm_invoke_iiiiii, wasm_invoke_iiiiiiffi, wasm_invoke_iiiiiifii, wasm_invoke_iiiiiii, wasm_invoke_iiiiiiif, wasm_invoke_iiiiiiiff, wasm_invoke_iiiiiiiffi, wasm_invoke_iiiiiiii, wasm_invoke_iiiiiiiii, wasm_invoke_iiiiiiiiii, wasm_invoke_iiiiiiiiiii, wasm_invoke_iiiiiiiiiiii, wasm_invoke_iiiiiiiiiiiii, wasm_invoke_iiiiiiiiiiiiii, wasm_invoke_iiiil, wasm_invoke_iiil, wasm_invoke_iiiliiii, wasm_invoke_iiilli, wasm_invoke_iil, wasm_invoke_iili, wasm_invoke_iiliiii, wasm_invoke_iiliiil, wasm_invoke_iill, wasm_invoke_iilli, wasm_invoke_iillli, wasm_invoke_il, wasm_invoke_ili, wasm_invoke_l, wasm_invoke_li, wasm_invoke_lii, wasm_invoke_liiii, wasm_invoke_liiil, wasm_invoke_lil, wasm_invoke_lili, wasm_invoke_lilii, wasm_invoke_lill, wasm_invoke_ll, wasm_invoke_v, wasm_invoke_vf, wasm_invoke_vff, wasm_invoke_vfff, wasm_invoke_vffff, wasm_invoke_vfffff, wasm_invoke_vffffff, wasm_invoke_vfffffff, wasm_invoke_vffffffff, wasm_invoke_vfi, wasm_invoke_vi, wasm_invoke_vid, wasm_invoke_vif, wasm_invoke_viff, wasm_invoke_vifff, wasm_invoke_viffff, wasm_invoke_vifffff, wasm_invoke_viffffff, wasm_invoke_vifffffi, wasm_invoke_viffffi, wasm_invoke_vifffi, wasm_invoke_vifffiiff, wasm_invoke_viffi, wasm_invoke_vifi, wasm_invoke_vii, wasm_invoke_viif, wasm_invoke_viiff, wasm_invoke_viifff, wasm_invoke_viiffff, wasm_invoke_viiffi, wasm_invoke_viiffii, wasm_invoke_viifi, wasm_invoke_viii, wasm_invoke_viiif, wasm_invoke_viiiffii, wasm_invoke_viiiffiii, wasm_invoke_viiifi, wasm_invoke_viiifii, wasm_invoke_viiifiii, wasm_invoke_viiii, wasm_invoke_viiiif, wasm_invoke_viiiiffii, wasm_invoke_viiiii, wasm_invoke_viiiiii, wasm_invoke_viiiiiii, wasm_invoke_viiiiiiii, wasm_invoke_viiiiiiiii, wasm_invoke_viiiiiiiiii, wasm_invoke_viiiiiiiiiii, wasm_invoke_viiiiiiiiiiii, wasm_invoke_viiiiiiiiiiiii, wasm_invoke_viiiiiiiiiiiiii, wasm_invoke_viiiiiiiiiiiiiii, wasm_invoke_viil, wasm_invoke_vil, wasm_invoke_villi, wasm_invoke_vl, wasm_invoke_vlii, };
-1
dotnet/runtime
66,367
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled
When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
jakobbotsch
2022-03-09T00:01:53Z
2022-03-09T12:59:09Z
c9f7f7389e8e9a00d501aef696333b67d218baac
cef825fd5425203ac28d073bf9fccc33c638f179
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled. When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
./src/coreclr/pal/tests/palsuite/threading/DuplicateHandle/test7/test7.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*===================================================================== ** ** Source: test7.c (DuplicateHandle) ** ** Purpose: Tests the PAL implementation of the DuplicateHandle function, ** with a handle from CreateThread. The test will create a thread ** handle and its duplicate. Then get the priorities of the threads, ** set the priority of one and the change should be seen in the ** other. ** ** **===================================================================*/ #include <palsuite.h> DWORD PALAPI CreateTestThread_DuplicateHandle_test7(LPVOID lpParam); PALTEST(threading_DuplicateHandle_test7_paltest_duplicatehandle_test7, "threading/DuplicateHandle/test7/paltest_duplicatehandle_test7") { HANDLE hThread; HANDLE hDupThread; DWORD dwThreadId = 0; LPTHREAD_START_ROUTINE lpStartAddress = &CreateTestThread_DuplicateHandle_test7; HANDLE hSyncEvent; int threadPriority; int duplicatePriority; int finalPriority; /* Initialize the PAL.*/ if ((PAL_Initialize(argc, argv)) != 0) { return (FAIL); } LPSECURITY_ATTRIBUTES lpEventAttributes = NULL; BOOL bManualReset = TRUE; BOOL bInitialState = FALSE; hSyncEvent = CreateEvent(lpEventAttributes, bManualReset, bInitialState, NULL); if (hSyncEvent == NULL) { Fail("ERROR:%u: Unable to create sync event.\n", GetLastError()); } /* Create a thread.*/ hThread = CreateThread(NULL, /* SD*/ (DWORD)0, /* initial stack size*/ lpStartAddress, /* thread function*/ (VOID*)hSyncEvent,/* thread argument*/ (DWORD)0, /* creation option*/ &dwThreadId); /* thread identifier*/ if (hThread == NULL) { Fail("ERROR:%u: Unable to create thread.\n", GetLastError()); } /* Duplicate the thread handle.*/ if (!(DuplicateHandle(GetCurrentProcess(), /* source handle process*/ hThread, /* handle to duplicate*/ GetCurrentProcess(), /* target process handle*/ &hDupThread, /* duplicate handle*/ (DWORD)0, /* requested access*/ FALSE, /* handle inheritance*/ DUPLICATE_SAME_ACCESS))) /* optional actions*/ { Trace("ERROR: %ld :Fail to create the duplicate handle" " to hThread=0x%lx", GetLastError(), hThread); CloseHandle(hThread); Fail(""); } /* Get the priority of the thread.*/ threadPriority = GetThreadPriority(hThread); if(threadPriority != 0) { Trace("ERROR: Thread priority of hThread=0x%lx should be " "set to normal THREAD_PRIORITY_NORMAL=%d\n", hThread, THREAD_PRIORITY_NORMAL); CloseHandle(hThread); CloseHandle(hDupThread); Fail(""); } /* Get the priority of the duplicated handle, and compare it to * the priority of the original thread. Should be the same.*/ duplicatePriority = GetThreadPriority(hThread); if(duplicatePriority != threadPriority) { Trace("ERROR: Expected priority of hThread=0x%lx and hDupThread=0x%lx" " to be the same. Priorities:hThread=\"%d\":hDupThread=\"%d\"\n", hThread, hDupThread, threadPriority, duplicatePriority); CloseHandle(hThread); CloseHandle(hDupThread); Fail(""); } /* Set the priority of the duplicate thread.*/ if(!SetThreadPriority (hDupThread,THREAD_PRIORITY_HIGHEST)) { Trace("ERROR:%u: SetThreadPriority failed on hThread=0x%lx\n", GetLastError(), hDupThread); CloseHandle(hThread); CloseHandle(hDupThread); Fail(""); } /* Get the priority of the original thread, and * compare it to what the duplicate was set to.*/ finalPriority = GetThreadPriority(hThread); if (finalPriority != THREAD_PRIORITY_HIGHEST) { Trace("ERROR: Expected priority of hThread=0x%lw and " "hDupThread=0x%lw to be set the same. Priorities:" "hThread=\"%d\":hDupThread=\"%d\".\n", hThread, hDupThread, threadPriority, duplicatePriority); CloseHandle(hThread); CloseHandle(hDupThread); Fail(""); } /* Signal the helper thread that it can shut down */ if (!SetEvent(hSyncEvent)) { Fail("ERROR:%u: Failed to set event.\n", GetLastError()); } /* Wait on the original thread.*/ if((WaitForSingleObject(hThread, 100)) != WAIT_OBJECT_0) { Trace("ERROR:%u: hThread=0x%lx is in a non-signalled " "mode, yet created signalled.\n", GetLastError(), hThread); CloseHandle(hThread); CloseHandle(hDupThread); Fail(""); } /* Clean-up thread and Terminate the PAL.*/ CloseHandle(hSyncEvent); CloseHandle(hThread); CloseHandle(hDupThread); PAL_Terminate(); return PASS; } /*Thread testing function*/ DWORD PALAPI CreateTestThread_DuplicateHandle_test7(LPVOID lpParam) { HANDLE hSyncEvent = (HANDLE)lpParam; /* Wait until the main thread signals that this helper thread should shut down */ WaitForSingleObject(hSyncEvent, INFINITE); return (DWORD)0; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*===================================================================== ** ** Source: test7.c (DuplicateHandle) ** ** Purpose: Tests the PAL implementation of the DuplicateHandle function, ** with a handle from CreateThread. The test will create a thread ** handle and its duplicate. Then get the priorities of the threads, ** set the priority of one and the change should be seen in the ** other. ** ** **===================================================================*/ #include <palsuite.h> DWORD PALAPI CreateTestThread_DuplicateHandle_test7(LPVOID lpParam); PALTEST(threading_DuplicateHandle_test7_paltest_duplicatehandle_test7, "threading/DuplicateHandle/test7/paltest_duplicatehandle_test7") { HANDLE hThread; HANDLE hDupThread; DWORD dwThreadId = 0; LPTHREAD_START_ROUTINE lpStartAddress = &CreateTestThread_DuplicateHandle_test7; HANDLE hSyncEvent; int threadPriority; int duplicatePriority; int finalPriority; /* Initialize the PAL.*/ if ((PAL_Initialize(argc, argv)) != 0) { return (FAIL); } LPSECURITY_ATTRIBUTES lpEventAttributes = NULL; BOOL bManualReset = TRUE; BOOL bInitialState = FALSE; hSyncEvent = CreateEvent(lpEventAttributes, bManualReset, bInitialState, NULL); if (hSyncEvent == NULL) { Fail("ERROR:%u: Unable to create sync event.\n", GetLastError()); } /* Create a thread.*/ hThread = CreateThread(NULL, /* SD*/ (DWORD)0, /* initial stack size*/ lpStartAddress, /* thread function*/ (VOID*)hSyncEvent,/* thread argument*/ (DWORD)0, /* creation option*/ &dwThreadId); /* thread identifier*/ if (hThread == NULL) { Fail("ERROR:%u: Unable to create thread.\n", GetLastError()); } /* Duplicate the thread handle.*/ if (!(DuplicateHandle(GetCurrentProcess(), /* source handle process*/ hThread, /* handle to duplicate*/ GetCurrentProcess(), /* target process handle*/ &hDupThread, /* duplicate handle*/ (DWORD)0, /* requested access*/ FALSE, /* handle inheritance*/ DUPLICATE_SAME_ACCESS))) /* optional actions*/ { Trace("ERROR: %ld :Fail to create the duplicate handle" " to hThread=0x%lx", GetLastError(), hThread); CloseHandle(hThread); Fail(""); } /* Get the priority of the thread.*/ threadPriority = GetThreadPriority(hThread); if(threadPriority != 0) { Trace("ERROR: Thread priority of hThread=0x%lx should be " "set to normal THREAD_PRIORITY_NORMAL=%d\n", hThread, THREAD_PRIORITY_NORMAL); CloseHandle(hThread); CloseHandle(hDupThread); Fail(""); } /* Get the priority of the duplicated handle, and compare it to * the priority of the original thread. Should be the same.*/ duplicatePriority = GetThreadPriority(hThread); if(duplicatePriority != threadPriority) { Trace("ERROR: Expected priority of hThread=0x%lx and hDupThread=0x%lx" " to be the same. Priorities:hThread=\"%d\":hDupThread=\"%d\"\n", hThread, hDupThread, threadPriority, duplicatePriority); CloseHandle(hThread); CloseHandle(hDupThread); Fail(""); } /* Set the priority of the duplicate thread.*/ if(!SetThreadPriority (hDupThread,THREAD_PRIORITY_HIGHEST)) { Trace("ERROR:%u: SetThreadPriority failed on hThread=0x%lx\n", GetLastError(), hDupThread); CloseHandle(hThread); CloseHandle(hDupThread); Fail(""); } /* Get the priority of the original thread, and * compare it to what the duplicate was set to.*/ finalPriority = GetThreadPriority(hThread); if (finalPriority != THREAD_PRIORITY_HIGHEST) { Trace("ERROR: Expected priority of hThread=0x%lw and " "hDupThread=0x%lw to be set the same. Priorities:" "hThread=\"%d\":hDupThread=\"%d\".\n", hThread, hDupThread, threadPriority, duplicatePriority); CloseHandle(hThread); CloseHandle(hDupThread); Fail(""); } /* Signal the helper thread that it can shut down */ if (!SetEvent(hSyncEvent)) { Fail("ERROR:%u: Failed to set event.\n", GetLastError()); } /* Wait on the original thread.*/ if((WaitForSingleObject(hThread, 100)) != WAIT_OBJECT_0) { Trace("ERROR:%u: hThread=0x%lx is in a non-signalled " "mode, yet created signalled.\n", GetLastError(), hThread); CloseHandle(hThread); CloseHandle(hDupThread); Fail(""); } /* Clean-up thread and Terminate the PAL.*/ CloseHandle(hSyncEvent); CloseHandle(hThread); CloseHandle(hDupThread); PAL_Terminate(); return PASS; } /*Thread testing function*/ DWORD PALAPI CreateTestThread_DuplicateHandle_test7(LPVOID lpParam) { HANDLE hSyncEvent = (HANDLE)lpParam; /* Wait until the main thread signals that this helper thread should shut down */ WaitForSingleObject(hSyncEvent, INFINITE); return (DWORD)0; }
-1
dotnet/runtime
66,367
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled
When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
jakobbotsch
2022-03-09T00:01:53Z
2022-03-09T12:59:09Z
c9f7f7389e8e9a00d501aef696333b67d218baac
cef825fd5425203ac28d073bf9fccc33c638f179
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled. When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
./src/coreclr/pal/tests/palsuite/threading/WaitForMultipleObjectsEx/test5/commonconsts.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================= ** ** Source: commonconsts.h ** ** **============================================================*/ #ifndef _COMMONCONSTS_H_ #define _COMMONCONSTS_H_ #include <pal.h> const int TIMEOUT = 60 * 5 * 1000; #define szcHelperProcessStartEvName "start" #define szcHelperProcessReadyEvName "ready" #define szcHelperProcessFinishEvName "finish" /* PEDANTIC and PEDANTIC0 is a helper macro that just grumps about any * zero return codes in a generic way. with little typing */ #define PEDANTIC(function, parameters) \ { \ if (! (function parameters) ) \ { \ Trace("%s: NonFatal failure of %s%s for reasons %u and %u\n", \ __FILE__, #function, #parameters, GetLastError(), errno); \ } \ } #define PEDANTIC1(function, parameters) \ { \ if ( (function parameters) ) \ { \ Trace("%s: NonFatal failure of %s%s for reasons %u and %u\n", \ __FILE__, #function, #parameters, GetLastError(), errno); \ } \ } #endif
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================= ** ** Source: commonconsts.h ** ** **============================================================*/ #ifndef _COMMONCONSTS_H_ #define _COMMONCONSTS_H_ #include <pal.h> const int TIMEOUT = 60 * 5 * 1000; #define szcHelperProcessStartEvName "start" #define szcHelperProcessReadyEvName "ready" #define szcHelperProcessFinishEvName "finish" /* PEDANTIC and PEDANTIC0 is a helper macro that just grumps about any * zero return codes in a generic way. with little typing */ #define PEDANTIC(function, parameters) \ { \ if (! (function parameters) ) \ { \ Trace("%s: NonFatal failure of %s%s for reasons %u and %u\n", \ __FILE__, #function, #parameters, GetLastError(), errno); \ } \ } #define PEDANTIC1(function, parameters) \ { \ if ( (function parameters) ) \ { \ Trace("%s: NonFatal failure of %s%s for reasons %u and %u\n", \ __FILE__, #function, #parameters, GetLastError(), errno); \ } \ } #endif
-1
dotnet/runtime
66,367
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled
When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
jakobbotsch
2022-03-09T00:01:53Z
2022-03-09T12:59:09Z
c9f7f7389e8e9a00d501aef696333b67d218baac
cef825fd5425203ac28d073bf9fccc33c638f179
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled. When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
./src/coreclr/debug/di/rsstackwalk.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // RsStackWalk.cpp // // // This file contains the implementation of the V3 managed stackwalking API. // // ====================================================================================== #include "stdafx.h" #include "primitives.h" //--------------------------------------------------------------------------------------- // // Constructor for CordbStackWalk. // // Arguments: // pCordbThread - the thread on which this stackwalker is created // CordbStackWalk::CordbStackWalk(CordbThread * pCordbThread) : CordbBase(pCordbThread->GetProcess(), 0, enumCordbStackWalk), m_pCordbThread(pCordbThread), m_pSFIHandle(NULL), m_cachedSetContextFlag(SET_CONTEXT_FLAG_ACTIVE_FRAME), m_cachedHR(S_OK), m_fIsOneFrameAhead(false) { m_pCachedFrame.Clear(); } void CordbStackWalk::Init() { CordbProcess * pProcess = GetProcess(); m_lastSyncFlushCounter = pProcess->m_flushCounter; IDacDbiInterface * pDAC = pProcess->GetDAC(); pDAC->CreateStackWalk(m_pCordbThread->m_vmThreadToken, &m_context, &m_pSFIHandle); // see the function header of code:CordbStackWalk::CheckForLegacyHijackCase CheckForLegacyHijackCase(); // Add itself to the neuter list. m_pCordbThread->GetRefreshStackNeuterList()->Add(GetProcess(), this); } // ---------------------------------------------------------------------------- // CordbStackWalk::CheckForLegacyHijackCase // // Description: // @dbgtodo legacy interop debugging - In the case of an unhandled hardware exception, the // thread will be hijacked to code:Debugger::GenericHijackFunc, which the stackwalker doesn't know how to // unwind. We can teach the stackwalker to recognize that hijack stub, but since it's going to be deprecated // anyway, it's not worth the effort. So we check for the hijack CONTEXT here and use it as the CONTEXT. This // check should be removed when we are completely // out-of-process. // void CordbStackWalk::CheckForLegacyHijackCase() { #if defined(FEATURE_INTEROP_DEBUGGING) CordbProcess * pProcess = GetProcess(); // Only do this if we have a shim and we are interop-debugging. if ((pProcess->GetShim() != NULL) && pProcess->IsInteropDebugging()) { // And only if we have a CordbUnmanagedThread and we are hijacked to code:Debugger::GenericHijackFunc CordbUnmanagedThread * pUT = pProcess->GetUnmanagedThread(m_pCordbThread->GetVolatileOSThreadID()); if (pUT != NULL) { if (pUT->IsFirstChanceHijacked() || pUT->IsGenericHijacked()) { // The GetThreadContext function hides the effects of hijacking and returns the unhijacked context m_context.ContextFlags = DT_CONTEXT_FULL; pUT->GetThreadContext(&m_context); IDacDbiInterface * pDAC = GetProcess()->GetDAC(); pDAC->SetStackWalkCurrentContext(m_pCordbThread->m_vmThreadToken, m_pSFIHandle, SET_CONTEXT_FLAG_ACTIVE_FRAME, &m_context); } } } #endif // FEATURE_INTEROP_DEBUGGING } //--------------------------------------------------------------------------------------- // // Destructor for CordbStackWalk. // // Notes: // We don't really need to do anything here since the CordbStackWalk should have been neutered already. // CordbStackWalk::~CordbStackWalk() { _ASSERTE(IsNeutered()); } //--------------------------------------------------------------------------------------- // // This function resets all the state on a CordbStackWalk and releases all the memory. // It is used for neutering and refreshing. // void CordbStackWalk::DeleteAll() { _ASSERTE(GetProcess()->GetProcessLock()->HasLock()); // delete allocated memory if (m_pSFIHandle) { HRESULT hr = S_OK; EX_TRY { #if defined(FEATURE_DBGIPC_TRANSPORT_DI) // For Mac debugging, it's not safe to call into the DAC once // code:INativeEventPipeline::TerminateProcess is called. This is because the transport will not // work anymore. The sole purpose of calling DeleteStackWalk() is to release the resources and // memory allocated for the stackwalk. In the remote debugging case, the memory is allocated in // the debuggee process. If the process is already terminated, then it's ok to skip the call. if (!GetProcess()->m_exiting) #endif // FEATURE_DBGIPC_TRANSPORT_DI { // This Delete call shouldn't actually throw. Worst case, the DDImpl leaked memory. GetProcess()->GetDAC()->DeleteStackWalk(m_pSFIHandle); } } EX_CATCH_HRESULT(hr); SIMPLIFYING_ASSUMPTION_SUCCEEDED(hr); m_pSFIHandle = NULL; } // clear out the cached frame m_pCachedFrame.Clear(); m_cachedHR = S_OK; m_fIsOneFrameAhead = false; } //--------------------------------------------------------------------------------------- // // Release all memory used by the stackwalker. // // // Notes: // CordbStackWalk is neutered by CordbThread or CleanupStack(). // void CordbStackWalk::Neuter() { if (IsNeutered()) { return; } DeleteAll(); CordbBase::Neuter(); } // standard QI function HRESULT CordbStackWalk::QueryInterface(REFIID id, void **pInterface) { if (id == IID_ICorDebugStackWalk) { *pInterface = static_cast<ICorDebugStackWalk*>(this); } else if (id == IID_IUnknown) { *pInterface = static_cast<IUnknown*>(static_cast<ICorDebugStackWalk*>(this)); } else { *pInterface = NULL; return E_NOINTERFACE; } ExternalAddRef(); return S_OK; } //--------------------------------------------------------------------------------------- // // Refreshes all the state stored on the CordbStackWalk. This is necessary because sending IPC events to // the LS flushes the DAC cache, and m_pSFIHandle is allocated entirely in DAC memory. So, we keep track // of whether we have sent an IPC event and refresh the CordbStackWalk if necessary. // // Notes: // Throws on error. // void CordbStackWalk::RefreshIfNeeded() { CordbProcess * pProcess = GetProcess(); _ASSERTE(pProcess->GetProcessLock()->HasLock()); // check if we need to refresh if (m_lastSyncFlushCounter != pProcess->m_flushCounter) { // Make a local copy of the CONTEXT here. DeleteAll() will delete the CONTEXT on the cached frame, // and CreateStackWalk() actually uses the CONTEXT buffer we pass to it. DT_CONTEXT ctx; if (m_fIsOneFrameAhead) { ctx = *(m_pCachedFrame->GetContext()); } else { ctx = m_context; } // clear all the state DeleteAll(); // create a new stackwalk handle pProcess->GetDAC()->CreateStackWalk(m_pCordbThread->m_vmThreadToken, &m_context, &m_pSFIHandle); // advance the stackwalker to where we originally were SetContextWorker(m_cachedSetContextFlag, sizeof(DT_CONTEXT), reinterpret_cast<BYTE *>(&ctx)); // update the sync counter m_lastSyncFlushCounter = pProcess->m_flushCounter; } } // CordbStackWalk::RefreshIfNeeded() //--------------------------------------------------------------------------------------- // // Retrieves the CONTEXT of the current frame. // // Arguments: // contextFlags - context flags used to determine the required size for the buffer // contextBufSize - size of the CONTEXT buffer // pContextSize - out parameter; returns the size required for the CONTEXT buffer // pbContextBuf - the CONTEXT buffer // // Return Value: // Return S_OK on success. // Return CORDBG_E_PAST_END_OF_STACK if we are already at the end of the stack. // Return HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER) if the buffer is too small. // Return E_FAIL on other failures. // HRESULT CordbStackWalk::GetContext(ULONG32 contextFlags, ULONG32 contextBufSize, ULONG32 * pContextSize, BYTE pbContextBuf[]) { HRESULT hr = S_OK; PUBLIC_REENTRANT_API_BEGIN(this) { RefreshIfNeeded(); // set the required size for the CONTEXT buffer if (pContextSize != NULL) { *pContextSize = ContextSizeForFlags(contextFlags); } // If all the user wants to know is the CONTEXT size, then we are done. if ((contextBufSize != 0) && (pbContextBuf != NULL)) { if (contextBufSize < 4) { ThrowWin32(ERROR_INSUFFICIENT_BUFFER); } DT_CONTEXT * pContext = reinterpret_cast<DT_CONTEXT *>(pbContextBuf); // Some helper functions that examine the context expect the flags to be initialized. pContext->ContextFlags = contextFlags; // check the size of the incoming buffer if (!CheckContextSizeForBuffer(contextBufSize, pbContextBuf)) { ThrowWin32(ERROR_INSUFFICIENT_BUFFER); } // Check if we are one frame ahead. If so, returned the CONTEXT on the cached frame. if (m_fIsOneFrameAhead) { if (m_pCachedFrame != NULL) { const DT_CONTEXT * pSrcContext = m_pCachedFrame->GetContext(); _ASSERTE(pSrcContext); CORDbgCopyThreadContext(pContext, pSrcContext); } else { // We encountered a problem when we were trying to initialize the CordbNativeFrame. // However, the problem occurred after we have unwound the current frame. // What do we do here? We don't have the CONTEXT anymore. _ASSERTE(FAILED(m_cachedHR)); ThrowHR(m_cachedHR); } } else { // No easy way out in this case. We have to call the DDI. IDacDbiInterface * pDAC = GetProcess()->GetDAC(); IDacDbiInterface::FrameType ft = pDAC->GetStackWalkCurrentFrameInfo(m_pSFIHandle, NULL); if (ft == IDacDbiInterface::kInvalid) { ThrowHR(E_FAIL); } else if (ft == IDacDbiInterface::kAtEndOfStack) { ThrowHR(CORDBG_E_PAST_END_OF_STACK); } else if (ft == IDacDbiInterface::kExplicitFrame) { ThrowHR(CORDBG_E_NO_CONTEXT_FOR_INTERNAL_FRAME); } else { // We always store the current CONTEXT, so just copy it into the buffer. CORDbgCopyThreadContext(pContext, &m_context); } } } } PUBLIC_REENTRANT_API_END(hr); return hr; } //--------------------------------------------------------------------------------------- // // Set the stackwalker to the specified CONTEXT. // // Arguments: // flag - context flags used to determine the size of the CONTEXT // contextSize - the size of the CONTEXT // context - the CONTEXT as a byte array // // Return Value: // Return S_OK on success. // Return E_INVALIDARG if context is NULL // Return HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER) if the CONTEXT is too small. // Return E_FAIL on other failures. // HRESULT CordbStackWalk::SetContext(CorDebugSetContextFlag flag, ULONG32 contextSize, BYTE context[]) { HRESULT hr = S_OK; PUBLIC_REENTRANT_API_BEGIN(this) { RefreshIfNeeded(); SetContextWorker(flag, contextSize, context); } PUBLIC_REENTRANT_API_END(hr); return hr; } //--------------------------------------------------------------------------------------- // // Refer to the comment for code:CordbStackWalk::SetContext // void CordbStackWalk::SetContextWorker(CorDebugSetContextFlag flag, ULONG32 contextSize, BYTE context[]) { if (context == NULL) { ThrowHR(E_INVALIDARG); } if (!CheckContextSizeForBuffer(contextSize, context)) { ThrowWin32(ERROR_INSUFFICIENT_BUFFER); } // invalidate the cache m_pCachedFrame.Clear(); m_cachedHR = S_OK; m_fIsOneFrameAhead = false; DT_CONTEXT * pSrcContext = reinterpret_cast<DT_CONTEXT *>(context); // Check the incoming CONTEXT using a temporary CONTEXT buffer before updating our real CONTEXT buffer. // The incoming CONTEXT is not required to have all the bits set in its CONTEXT flags, so only update // the registers specified by the CONTEXT flags. Note that CORDbgCopyThreadContext() honours the CONTEXT // flags on both the source and the destination CONTEXTs when it copies them. DT_CONTEXT tmpCtx = m_context; tmpCtx.ContextFlags |= pSrcContext->ContextFlags; CORDbgCopyThreadContext(&tmpCtx, pSrcContext); IDacDbiInterface * pDAC = GetProcess()->GetDAC(); IfFailThrow(pDAC->CheckContext(m_pCordbThread->m_vmThreadToken, &tmpCtx)); // At this point we have done all of our checks to verify that the incoming CONTEXT is sane, so we can // update our internal CONTEXT buffer. m_context = tmpCtx; m_cachedSetContextFlag = flag; pDAC->SetStackWalkCurrentContext(m_pCordbThread->m_vmThreadToken, m_pSFIHandle, flag, &m_context); } //--------------------------------------------------------------------------------------- // // Helper to perform all the necessary operations when we unwind, including: // 1) Unwind // 2) Save the new unwound CONTEXT // // Return Value: // Return TRUE if we successfully unwind to the next frame. // Return FALSE if there is no more frame to walk. // Throw on error. // BOOL CordbStackWalk::UnwindStackFrame() { CordbProcess * pProcess = GetProcess(); _ASSERTE(pProcess->GetProcessLock()->HasLock()); IDacDbiInterface * pDAC = pProcess->GetDAC(); BOOL retVal = pDAC->UnwindStackWalkFrame(m_pSFIHandle); // Now that we have unwound, make sure we update the CONTEXT buffer to reflect the current stack frame. // This call is safe regardless of whether the unwind is successful or not. pDAC->GetStackWalkCurrentContext(m_pSFIHandle, &m_context); return retVal; } // CordbStackWalk::UnwindStackWalkFrame //--------------------------------------------------------------------------------------- // // Unwind the stackwalker to the next frame. // // Return Value: // Return S_OK on success. // Return CORDBG_E_FAIL_TO_UNWIND_FRAME if the unwind fails. // Return CORDBG_S_AT_END_OF_STACK if we have reached the end of the stack as a result of this unwind. // Return CORDBG_E_PAST_END_OF_STACK if we are already at the end of the stack to begin with. // HRESULT CordbStackWalk::Next() { HRESULT hr = S_OK; PUBLIC_REENTRANT_API_BEGIN(this) { RefreshIfNeeded(); if (m_fIsOneFrameAhead) { // We have already unwound to the next frame when we materialize the CordbNativeFrame // for the current frame. So we just need to clear the cache because we are already at // the next frame. if (m_pCachedFrame != NULL) { m_pCachedFrame.Clear(); } m_cachedHR = S_OK; m_fIsOneFrameAhead = false; } else { IDacDbiInterface * pDAC = GetProcess()->GetDAC(); IDacDbiInterface::FrameType ft = IDacDbiInterface::kInvalid; ft = pDAC->GetStackWalkCurrentFrameInfo(this->m_pSFIHandle, NULL); if (ft == IDacDbiInterface::kAtEndOfStack) { ThrowHR(CORDBG_E_PAST_END_OF_STACK); } // update the cahced flag to indicate that we have reached an unwind CONTEXT m_cachedSetContextFlag = SET_CONTEXT_FLAG_UNWIND_FRAME; if (UnwindStackFrame()) { hr = S_OK; } else { hr = CORDBG_S_AT_END_OF_STACK; } } } PUBLIC_REENTRANT_API_END(hr); return hr; } //--------------------------------------------------------------------------------------- // // Retrieves an ICDFrame corresponding to the current frame: // Stopped At Out Parameter Return Value // ---------- ------------- ------------ // explicit frame CordbInternalFrame S_OK // managed stack frame CordbNativeFrame S_OK // native stack frame NULL S_FALSE // // Arguments: // ppFrame - out parameter; return the ICDFrame // // Return Value: // On success return the HRs above. // Return CORDBG_E_PAST_END_OF_STACK if we are already at the end of the stack. // Return E_INVALIDARG if ppFrame is NULL // Return E_FAIL on other errors. // // Notes: // This is just a wrapper with an EX_TRY/EX_CATCH_HRESULT for GetFrameWorker(). // HRESULT CordbStackWalk::GetFrame(ICorDebugFrame ** ppFrame) { HRESULT hr = S_OK; PUBLIC_REENTRANT_API_NO_LOCK_BEGIN(this) { ATT_REQUIRE_STOPPED_MAY_FAIL_OR_THROW(GetProcess(), ThrowHR); RSLockHolder lockHolder(GetProcess()->GetProcessLock()); RefreshIfNeeded(); hr = GetFrameWorker(ppFrame); } PUBLIC_REENTRANT_API_END(hr); if (FAILED(hr)) { if (m_fIsOneFrameAhead && (m_pCachedFrame == NULL)) { // We encountered a problem when we try to materialize a CordbNativeFrame. // Cache the failure HR so that we can return it later if the caller // calls GetFrame() again or GetContext(). m_cachedHR = hr; } } return hr; } //--------------------------------------------------------------------------------------- // // Refer to the comment for code:CordbStackWalk::GetFrame // HRESULT CordbStackWalk::GetFrameWorker(ICorDebugFrame ** ppFrame) { _ASSERTE(GetProcess()->GetProcessLock()->HasLock()); if (ppFrame == NULL) { ThrowHR(E_INVALIDARG); } *ppFrame = NULL; RSInitHolder<CordbFrame> pResultFrame(NULL); if (m_fIsOneFrameAhead) { if (m_pCachedFrame != NULL) { pResultFrame.Assign(m_pCachedFrame); pResultFrame.TransferOwnershipExternal(ppFrame); return S_OK; } else { // We encountered a problem when we were trying to initialize the CordbNativeFrame. // However, the problem occurred after we have unwound the current frame. // Whatever error code we return, it should be the same one GetContext() returns. _ASSERTE(FAILED(m_cachedHR)); ThrowHR(m_cachedHR); } } IDacDbiInterface * pDAC = NULL; DebuggerIPCE_STRData frameData; ZeroMemory(&frameData, sizeof(frameData)); IDacDbiInterface::FrameType ft = IDacDbiInterface::kInvalid; pDAC = GetProcess()->GetDAC(); ft = pDAC->GetStackWalkCurrentFrameInfo(m_pSFIHandle, &frameData); if (ft == IDacDbiInterface::kInvalid) { STRESS_LOG1(LF_CORDB, LL_INFO1000, "CSW::GFW - invalid stackwalker (%p)", this); ThrowHR(E_FAIL); } else if (ft == IDacDbiInterface::kAtEndOfStack) { STRESS_LOG1(LF_CORDB, LL_INFO1000, "CSW::GFW - past end of stack (%p)", this); ThrowHR(CORDBG_E_PAST_END_OF_STACK); } else if (ft == IDacDbiInterface::kNativeStackFrame) { STRESS_LOG1(LF_CORDB, LL_INFO1000, "CSW::GFW - native stack frame (%p)", this); return S_FALSE; } else if (ft == IDacDbiInterface::kExplicitFrame) { STRESS_LOG1(LF_CORDB, LL_INFO1000, "CSW::GFW - explicit frame (%p)", this); // We no longer expect to get internal frames by unwinding. GetProcess()->TargetConsistencyCheck(false); } else if (ft == IDacDbiInterface::kManagedStackFrame) { _ASSERTE(frameData.eType == DebuggerIPCE_STRData::cMethodFrame); HRESULT hr = S_OK; // In order to find the FramePointer on x86, we need to unwind to the next frame. // Technically, only x86 needs to do this, because the x86 runtime stackwalker doesn't uwnind // one frame ahead of time. However, we are doing this on all platforms to keep things simple. BOOL fSuccess = UnwindStackFrame(); (void)fSuccess; //prevent "unused variable" error from GCC _ASSERTE(fSuccess); m_fIsOneFrameAhead = true; #if defined(TARGET_X86) frameData.fp = pDAC->GetFramePointer(m_pSFIHandle); #endif // TARGET_X86 // currentFuncData contains general information about the method. // It has no information about any particular jitted instance of the method. DebuggerIPCE_FuncData * pFuncData = &(frameData.v.funcData); // currentJITFuncData contains information about the current jitted instance of the method // on the stack. DebuggerIPCE_JITFuncData * pJITFuncData = &(frameData.v.jitFuncData); // Lookup the appdomain that the thread was in when it was executing code for this frame. We pass this // to the frame when we create it so we can properly resolve locals in that frame later. CordbAppDomain * pCurrentAppDomain = GetProcess()->LookupOrCreateAppDomain(frameData.vmCurrentAppDomainToken); _ASSERTE(pCurrentAppDomain != NULL); // Lookup the module CordbModule* pModule = pCurrentAppDomain->LookupOrCreateModule(pFuncData->vmDomainAssembly); PREFIX_ASSUME(pModule != NULL); // Create or look up a CordbNativeCode. There is one for each jitted instance of a method, // and we may have multiple instances because of generics. CordbNativeCode * pNativeCode = pModule->LookupOrCreateNativeCode(pFuncData->funcMetadataToken, pJITFuncData->vmNativeCodeMethodDescToken, pJITFuncData->nativeStartAddressPtr); IfFailThrow(hr); // The native code object will create the function object if needed CordbFunction * pFunction = pNativeCode->GetFunction(); // A CordbFunction is theoretically the uninstantiated method, yet for back-compat we allow // debuggers to assume that it corresponds to exactly 1 native code blob. In order for // an open generic function to know what native code to give back, we attach an arbitrary // native code that we located through code inspection. // Note that not all CordbFunction objects get created via stack traces because you can also // create them by name. In that case you still won't get code for Open generic functions // because we will never have attached one and the lookup by token is insufficient. This // behavior mimics our 2.0 debugging behavior though so its not a regression. pFunction->NotifyCodeCreated(pNativeCode); IfFailThrow(hr); _ASSERTE((pFunction != NULL) && (pNativeCode != NULL)); // initialize the auxiliary info required for funclets CordbMiscFrame miscFrame(pJITFuncData); // Create the native frame. CordbNativeFrame* pNativeFrame = new CordbNativeFrame(m_pCordbThread, frameData.fp, pNativeCode, pJITFuncData->nativeOffset, &(frameData.rd), frameData.v.taAmbientESP, !!frameData.quicklyUnwound, pCurrentAppDomain, &miscFrame, &(frameData.ctx)); pResultFrame.Assign(static_cast<CordbFrame *>(pNativeFrame)); m_pCachedFrame.Assign(static_cast<CordbFrame *>(pNativeFrame)); // @dbgtodo dynamic language debugging // If we are dealing with a dynamic method (e.g. an IL stub, a LCG method, etc.), // then we don't have the metadata or the debug info (sequence points, etc.). // This means that we can't do anything meaningful with a CordbJITILFrame anyway, // so let's not create the CordbJITILFrame at all. Note that methods created with // RefEmit are okay, i.e. they have metadata. // The check for IsNativeImpl() != CordbFunction::kNativeOnly catches an odd profiler // case. A profiler can rewrite assemblies at load time so that a P/invoke becomes a // regular managed method. mscordbi isn't yet designed to handle runtime metadata // changes, so it still thinks the method is a p/invoke. If we only relied on // frameData.v.fNoMetadata which is populated by the DAC, that will report // FALSE (the method does have metadata/IL now). However pNativeCode->LoadNativeInfo // is going to check DBI's metadata and calculate this is a p/invoke, which will // throw an exception that the method isn't IL. // Ideally we probably want to expose the profiler's change to the method, // however that will take significant work. Part of that is correctly detecting and // updating metadata in DBI, part is determinging if/how the debugger is notified, // and part is auditing mscordbi to ensure that anything we cached based on the // old metadata is correctly invalidated. // Since this is a late fix going into a controlled servicing release I have // opted for a much narrower fix. Doing the check for IsNativeImpl() != CordbFunction::kNativeOnly // will continue to treat our new method as though it was a p/invoke, and the // debugger will not provide IL for it. The debugger can't inspect within the profiler // modified method, but at least the error won't leak out to interfere with inspection // of the callstack as a whole. if (!frameData.v.fNoMetadata && pNativeCode->GetFunction()->IsNativeImpl() != CordbFunction::kNativeOnly) { pNativeCode->LoadNativeInfo(); // By design, when a managed exception occurs we return the sequence point containing the faulting // instruction in the leaf frame. In the past we didn't always achieve this, // but we are being more deliberate about this behavior now. // If jsutAfterILThrow is true, it means nativeOffset points to the return address of IL_Throw // (or another JIT exception helper) after an exception has been thrown. // In such cases we want to adjust nativeOffset, so it will point an actual exception callsite. // By subtracting STACKWALK_CONTROLPC_ADJUST_OFFSET from nativeOffset you can get // an address somewhere inside CALL instruction. // This ensures more consistent placement of exception line highlighting in Visual Studio DWORD nativeOffsetToMap = pJITFuncData->jsutAfterILThrow ? (DWORD)pJITFuncData->nativeOffset - STACKWALK_CONTROLPC_ADJUST_OFFSET : (DWORD)pJITFuncData->nativeOffset; CorDebugMappingResult mappingType; ULONG uILOffset = pNativeCode->GetSequencePoints()->MapNativeOffsetToIL( nativeOffsetToMap, &mappingType); // Find or create the IL Code, and the pJITILFrame. RSExtSmartPtr<CordbILCode> pCode; // The code for populating CordbFunction ILCode looks really bizzare... it appears to only grab the // correct version of the IL if that is still the current EnC version yet it is populated deliberately // late bound at which point the latest version may be different. In fact even here the latest version // could already be different, but this is no worse than what the code used to do hr = pFunction->GetILCode(&pCode); IfFailThrow(hr); _ASSERTE(pCode != NULL); // We populate the code for ReJit eagerly to make sure we still have it if the profiler removes the // instrumentation later. Of course the only way it will still be accessible to our caller is if they // save a pointer to the ILCode. // I'm not sure if ignoring rejit for mini-dumps is the right call long term, but we aren't doing // anything special to collect the memory at dump time so we better be prepared to not fetch it here. // We'll attempt to treat it as not being instrumented, though I suspect the abstraction is leaky. RSSmartPtr<CordbReJitILCode> pReJitCode; EX_TRY_ALLOW_DATATARGET_MISSING_MEMORY { VMPTR_NativeCodeVersionNode vmNativeCodeVersionNode = VMPTR_NativeCodeVersionNode::NullPtr(); IfFailThrow(GetProcess()->GetDAC()->GetNativeCodeVersionNode(pJITFuncData->vmNativeCodeMethodDescToken, pJITFuncData->nativeStartAddressPtr, &vmNativeCodeVersionNode)); if (!vmNativeCodeVersionNode.IsNull()) { VMPTR_ILCodeVersionNode vmILCodeVersionNode = VMPTR_ILCodeVersionNode::NullPtr(); IfFailThrow(GetProcess()->GetDAC()->GetILCodeVersionNode(vmNativeCodeVersionNode, &vmILCodeVersionNode)); if (!vmILCodeVersionNode.IsNull()) { IfFailThrow(pFunction->LookupOrCreateReJitILCode(vmILCodeVersionNode, &pReJitCode)); } } } EX_END_CATCH_ALLOW_DATATARGET_MISSING_MEMORY RSInitHolder<CordbJITILFrame> pJITILFrame(new CordbJITILFrame(pNativeFrame, pCode, uILOffset, mappingType, frameData.v.exactGenericArgsToken, frameData.v.dwExactGenericArgsTokenIndex, !!frameData.v.fVarArgs, pReJitCode)); // Initialize the frame. This is a nop if the method is not a vararg method. hr = pJITILFrame->Init(); IfFailThrow(hr); pNativeFrame->m_JITILFrame.Assign(pJITILFrame); pJITILFrame.ClearAndMarkDontNeuter(); } STRESS_LOG3(LF_CORDB, LL_INFO1000, "CSW::GFW - managed stack frame (%p): CNF - 0x%p, CJILF - 0x%p", this, pNativeFrame, pNativeFrame->m_JITILFrame.GetValue()); } // kManagedStackFrame else if (ft == IDacDbiInterface::kNativeRuntimeUnwindableStackFrame) { _ASSERTE(frameData.eType == DebuggerIPCE_STRData::cRuntimeNativeFrame); // In order to find the FramePointer on x86, we need to unwind to the next frame. // Technically, only x86 needs to do this, because the x86 runtime stackwalker doesn't uwnind // one frame ahead of time. However, we are doing this on all platforms to keep things simple. BOOL fSuccess = UnwindStackFrame(); (void)fSuccess; //prevent "unused variable" error from GCC _ASSERTE(fSuccess); m_fIsOneFrameAhead = true; #if defined(TARGET_X86) frameData.fp = pDAC->GetFramePointer(m_pSFIHandle); #endif // TARGET_X86 // Lookup the appdomain that the thread was in when it was executing code for this frame. We pass this // to the frame when we create it so we can properly resolve locals in that frame later. CordbAppDomain * pCurrentAppDomain = GetProcess()->LookupOrCreateAppDomain(frameData.vmCurrentAppDomainToken); _ASSERTE(pCurrentAppDomain != NULL); CordbRuntimeUnwindableFrame * pRuntimeFrame = new CordbRuntimeUnwindableFrame(m_pCordbThread, frameData.fp, pCurrentAppDomain, &(frameData.ctx)); pResultFrame.Assign(static_cast<CordbFrame *>(pRuntimeFrame)); m_pCachedFrame.Assign(static_cast<CordbFrame *>(pRuntimeFrame)); STRESS_LOG2(LF_CORDB, LL_INFO1000, "CSW::GFW - runtime unwindable stack frame (%p): 0x%p", this, pRuntimeFrame); } pResultFrame.TransferOwnershipExternal(ppFrame); return S_OK; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // RsStackWalk.cpp // // // This file contains the implementation of the V3 managed stackwalking API. // // ====================================================================================== #include "stdafx.h" #include "primitives.h" //--------------------------------------------------------------------------------------- // // Constructor for CordbStackWalk. // // Arguments: // pCordbThread - the thread on which this stackwalker is created // CordbStackWalk::CordbStackWalk(CordbThread * pCordbThread) : CordbBase(pCordbThread->GetProcess(), 0, enumCordbStackWalk), m_pCordbThread(pCordbThread), m_pSFIHandle(NULL), m_cachedSetContextFlag(SET_CONTEXT_FLAG_ACTIVE_FRAME), m_cachedHR(S_OK), m_fIsOneFrameAhead(false) { m_pCachedFrame.Clear(); } void CordbStackWalk::Init() { CordbProcess * pProcess = GetProcess(); m_lastSyncFlushCounter = pProcess->m_flushCounter; IDacDbiInterface * pDAC = pProcess->GetDAC(); pDAC->CreateStackWalk(m_pCordbThread->m_vmThreadToken, &m_context, &m_pSFIHandle); // see the function header of code:CordbStackWalk::CheckForLegacyHijackCase CheckForLegacyHijackCase(); // Add itself to the neuter list. m_pCordbThread->GetRefreshStackNeuterList()->Add(GetProcess(), this); } // ---------------------------------------------------------------------------- // CordbStackWalk::CheckForLegacyHijackCase // // Description: // @dbgtodo legacy interop debugging - In the case of an unhandled hardware exception, the // thread will be hijacked to code:Debugger::GenericHijackFunc, which the stackwalker doesn't know how to // unwind. We can teach the stackwalker to recognize that hijack stub, but since it's going to be deprecated // anyway, it's not worth the effort. So we check for the hijack CONTEXT here and use it as the CONTEXT. This // check should be removed when we are completely // out-of-process. // void CordbStackWalk::CheckForLegacyHijackCase() { #if defined(FEATURE_INTEROP_DEBUGGING) CordbProcess * pProcess = GetProcess(); // Only do this if we have a shim and we are interop-debugging. if ((pProcess->GetShim() != NULL) && pProcess->IsInteropDebugging()) { // And only if we have a CordbUnmanagedThread and we are hijacked to code:Debugger::GenericHijackFunc CordbUnmanagedThread * pUT = pProcess->GetUnmanagedThread(m_pCordbThread->GetVolatileOSThreadID()); if (pUT != NULL) { if (pUT->IsFirstChanceHijacked() || pUT->IsGenericHijacked()) { // The GetThreadContext function hides the effects of hijacking and returns the unhijacked context m_context.ContextFlags = DT_CONTEXT_FULL; pUT->GetThreadContext(&m_context); IDacDbiInterface * pDAC = GetProcess()->GetDAC(); pDAC->SetStackWalkCurrentContext(m_pCordbThread->m_vmThreadToken, m_pSFIHandle, SET_CONTEXT_FLAG_ACTIVE_FRAME, &m_context); } } } #endif // FEATURE_INTEROP_DEBUGGING } //--------------------------------------------------------------------------------------- // // Destructor for CordbStackWalk. // // Notes: // We don't really need to do anything here since the CordbStackWalk should have been neutered already. // CordbStackWalk::~CordbStackWalk() { _ASSERTE(IsNeutered()); } //--------------------------------------------------------------------------------------- // // This function resets all the state on a CordbStackWalk and releases all the memory. // It is used for neutering and refreshing. // void CordbStackWalk::DeleteAll() { _ASSERTE(GetProcess()->GetProcessLock()->HasLock()); // delete allocated memory if (m_pSFIHandle) { HRESULT hr = S_OK; EX_TRY { #if defined(FEATURE_DBGIPC_TRANSPORT_DI) // For Mac debugging, it's not safe to call into the DAC once // code:INativeEventPipeline::TerminateProcess is called. This is because the transport will not // work anymore. The sole purpose of calling DeleteStackWalk() is to release the resources and // memory allocated for the stackwalk. In the remote debugging case, the memory is allocated in // the debuggee process. If the process is already terminated, then it's ok to skip the call. if (!GetProcess()->m_exiting) #endif // FEATURE_DBGIPC_TRANSPORT_DI { // This Delete call shouldn't actually throw. Worst case, the DDImpl leaked memory. GetProcess()->GetDAC()->DeleteStackWalk(m_pSFIHandle); } } EX_CATCH_HRESULT(hr); SIMPLIFYING_ASSUMPTION_SUCCEEDED(hr); m_pSFIHandle = NULL; } // clear out the cached frame m_pCachedFrame.Clear(); m_cachedHR = S_OK; m_fIsOneFrameAhead = false; } //--------------------------------------------------------------------------------------- // // Release all memory used by the stackwalker. // // // Notes: // CordbStackWalk is neutered by CordbThread or CleanupStack(). // void CordbStackWalk::Neuter() { if (IsNeutered()) { return; } DeleteAll(); CordbBase::Neuter(); } // standard QI function HRESULT CordbStackWalk::QueryInterface(REFIID id, void **pInterface) { if (id == IID_ICorDebugStackWalk) { *pInterface = static_cast<ICorDebugStackWalk*>(this); } else if (id == IID_IUnknown) { *pInterface = static_cast<IUnknown*>(static_cast<ICorDebugStackWalk*>(this)); } else { *pInterface = NULL; return E_NOINTERFACE; } ExternalAddRef(); return S_OK; } //--------------------------------------------------------------------------------------- // // Refreshes all the state stored on the CordbStackWalk. This is necessary because sending IPC events to // the LS flushes the DAC cache, and m_pSFIHandle is allocated entirely in DAC memory. So, we keep track // of whether we have sent an IPC event and refresh the CordbStackWalk if necessary. // // Notes: // Throws on error. // void CordbStackWalk::RefreshIfNeeded() { CordbProcess * pProcess = GetProcess(); _ASSERTE(pProcess->GetProcessLock()->HasLock()); // check if we need to refresh if (m_lastSyncFlushCounter != pProcess->m_flushCounter) { // Make a local copy of the CONTEXT here. DeleteAll() will delete the CONTEXT on the cached frame, // and CreateStackWalk() actually uses the CONTEXT buffer we pass to it. DT_CONTEXT ctx; if (m_fIsOneFrameAhead) { ctx = *(m_pCachedFrame->GetContext()); } else { ctx = m_context; } // clear all the state DeleteAll(); // create a new stackwalk handle pProcess->GetDAC()->CreateStackWalk(m_pCordbThread->m_vmThreadToken, &m_context, &m_pSFIHandle); // advance the stackwalker to where we originally were SetContextWorker(m_cachedSetContextFlag, sizeof(DT_CONTEXT), reinterpret_cast<BYTE *>(&ctx)); // update the sync counter m_lastSyncFlushCounter = pProcess->m_flushCounter; } } // CordbStackWalk::RefreshIfNeeded() //--------------------------------------------------------------------------------------- // // Retrieves the CONTEXT of the current frame. // // Arguments: // contextFlags - context flags used to determine the required size for the buffer // contextBufSize - size of the CONTEXT buffer // pContextSize - out parameter; returns the size required for the CONTEXT buffer // pbContextBuf - the CONTEXT buffer // // Return Value: // Return S_OK on success. // Return CORDBG_E_PAST_END_OF_STACK if we are already at the end of the stack. // Return HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER) if the buffer is too small. // Return E_FAIL on other failures. // HRESULT CordbStackWalk::GetContext(ULONG32 contextFlags, ULONG32 contextBufSize, ULONG32 * pContextSize, BYTE pbContextBuf[]) { HRESULT hr = S_OK; PUBLIC_REENTRANT_API_BEGIN(this) { RefreshIfNeeded(); // set the required size for the CONTEXT buffer if (pContextSize != NULL) { *pContextSize = ContextSizeForFlags(contextFlags); } // If all the user wants to know is the CONTEXT size, then we are done. if ((contextBufSize != 0) && (pbContextBuf != NULL)) { if (contextBufSize < 4) { ThrowWin32(ERROR_INSUFFICIENT_BUFFER); } DT_CONTEXT * pContext = reinterpret_cast<DT_CONTEXT *>(pbContextBuf); // Some helper functions that examine the context expect the flags to be initialized. pContext->ContextFlags = contextFlags; // check the size of the incoming buffer if (!CheckContextSizeForBuffer(contextBufSize, pbContextBuf)) { ThrowWin32(ERROR_INSUFFICIENT_BUFFER); } // Check if we are one frame ahead. If so, returned the CONTEXT on the cached frame. if (m_fIsOneFrameAhead) { if (m_pCachedFrame != NULL) { const DT_CONTEXT * pSrcContext = m_pCachedFrame->GetContext(); _ASSERTE(pSrcContext); CORDbgCopyThreadContext(pContext, pSrcContext); } else { // We encountered a problem when we were trying to initialize the CordbNativeFrame. // However, the problem occurred after we have unwound the current frame. // What do we do here? We don't have the CONTEXT anymore. _ASSERTE(FAILED(m_cachedHR)); ThrowHR(m_cachedHR); } } else { // No easy way out in this case. We have to call the DDI. IDacDbiInterface * pDAC = GetProcess()->GetDAC(); IDacDbiInterface::FrameType ft = pDAC->GetStackWalkCurrentFrameInfo(m_pSFIHandle, NULL); if (ft == IDacDbiInterface::kInvalid) { ThrowHR(E_FAIL); } else if (ft == IDacDbiInterface::kAtEndOfStack) { ThrowHR(CORDBG_E_PAST_END_OF_STACK); } else if (ft == IDacDbiInterface::kExplicitFrame) { ThrowHR(CORDBG_E_NO_CONTEXT_FOR_INTERNAL_FRAME); } else { // We always store the current CONTEXT, so just copy it into the buffer. CORDbgCopyThreadContext(pContext, &m_context); } } } } PUBLIC_REENTRANT_API_END(hr); return hr; } //--------------------------------------------------------------------------------------- // // Set the stackwalker to the specified CONTEXT. // // Arguments: // flag - context flags used to determine the size of the CONTEXT // contextSize - the size of the CONTEXT // context - the CONTEXT as a byte array // // Return Value: // Return S_OK on success. // Return E_INVALIDARG if context is NULL // Return HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER) if the CONTEXT is too small. // Return E_FAIL on other failures. // HRESULT CordbStackWalk::SetContext(CorDebugSetContextFlag flag, ULONG32 contextSize, BYTE context[]) { HRESULT hr = S_OK; PUBLIC_REENTRANT_API_BEGIN(this) { RefreshIfNeeded(); SetContextWorker(flag, contextSize, context); } PUBLIC_REENTRANT_API_END(hr); return hr; } //--------------------------------------------------------------------------------------- // // Refer to the comment for code:CordbStackWalk::SetContext // void CordbStackWalk::SetContextWorker(CorDebugSetContextFlag flag, ULONG32 contextSize, BYTE context[]) { if (context == NULL) { ThrowHR(E_INVALIDARG); } if (!CheckContextSizeForBuffer(contextSize, context)) { ThrowWin32(ERROR_INSUFFICIENT_BUFFER); } // invalidate the cache m_pCachedFrame.Clear(); m_cachedHR = S_OK; m_fIsOneFrameAhead = false; DT_CONTEXT * pSrcContext = reinterpret_cast<DT_CONTEXT *>(context); // Check the incoming CONTEXT using a temporary CONTEXT buffer before updating our real CONTEXT buffer. // The incoming CONTEXT is not required to have all the bits set in its CONTEXT flags, so only update // the registers specified by the CONTEXT flags. Note that CORDbgCopyThreadContext() honours the CONTEXT // flags on both the source and the destination CONTEXTs when it copies them. DT_CONTEXT tmpCtx = m_context; tmpCtx.ContextFlags |= pSrcContext->ContextFlags; CORDbgCopyThreadContext(&tmpCtx, pSrcContext); IDacDbiInterface * pDAC = GetProcess()->GetDAC(); IfFailThrow(pDAC->CheckContext(m_pCordbThread->m_vmThreadToken, &tmpCtx)); // At this point we have done all of our checks to verify that the incoming CONTEXT is sane, so we can // update our internal CONTEXT buffer. m_context = tmpCtx; m_cachedSetContextFlag = flag; pDAC->SetStackWalkCurrentContext(m_pCordbThread->m_vmThreadToken, m_pSFIHandle, flag, &m_context); } //--------------------------------------------------------------------------------------- // // Helper to perform all the necessary operations when we unwind, including: // 1) Unwind // 2) Save the new unwound CONTEXT // // Return Value: // Return TRUE if we successfully unwind to the next frame. // Return FALSE if there is no more frame to walk. // Throw on error. // BOOL CordbStackWalk::UnwindStackFrame() { CordbProcess * pProcess = GetProcess(); _ASSERTE(pProcess->GetProcessLock()->HasLock()); IDacDbiInterface * pDAC = pProcess->GetDAC(); BOOL retVal = pDAC->UnwindStackWalkFrame(m_pSFIHandle); // Now that we have unwound, make sure we update the CONTEXT buffer to reflect the current stack frame. // This call is safe regardless of whether the unwind is successful or not. pDAC->GetStackWalkCurrentContext(m_pSFIHandle, &m_context); return retVal; } // CordbStackWalk::UnwindStackWalkFrame //--------------------------------------------------------------------------------------- // // Unwind the stackwalker to the next frame. // // Return Value: // Return S_OK on success. // Return CORDBG_E_FAIL_TO_UNWIND_FRAME if the unwind fails. // Return CORDBG_S_AT_END_OF_STACK if we have reached the end of the stack as a result of this unwind. // Return CORDBG_E_PAST_END_OF_STACK if we are already at the end of the stack to begin with. // HRESULT CordbStackWalk::Next() { HRESULT hr = S_OK; PUBLIC_REENTRANT_API_BEGIN(this) { RefreshIfNeeded(); if (m_fIsOneFrameAhead) { // We have already unwound to the next frame when we materialize the CordbNativeFrame // for the current frame. So we just need to clear the cache because we are already at // the next frame. if (m_pCachedFrame != NULL) { m_pCachedFrame.Clear(); } m_cachedHR = S_OK; m_fIsOneFrameAhead = false; } else { IDacDbiInterface * pDAC = GetProcess()->GetDAC(); IDacDbiInterface::FrameType ft = IDacDbiInterface::kInvalid; ft = pDAC->GetStackWalkCurrentFrameInfo(this->m_pSFIHandle, NULL); if (ft == IDacDbiInterface::kAtEndOfStack) { ThrowHR(CORDBG_E_PAST_END_OF_STACK); } // update the cahced flag to indicate that we have reached an unwind CONTEXT m_cachedSetContextFlag = SET_CONTEXT_FLAG_UNWIND_FRAME; if (UnwindStackFrame()) { hr = S_OK; } else { hr = CORDBG_S_AT_END_OF_STACK; } } } PUBLIC_REENTRANT_API_END(hr); return hr; } //--------------------------------------------------------------------------------------- // // Retrieves an ICDFrame corresponding to the current frame: // Stopped At Out Parameter Return Value // ---------- ------------- ------------ // explicit frame CordbInternalFrame S_OK // managed stack frame CordbNativeFrame S_OK // native stack frame NULL S_FALSE // // Arguments: // ppFrame - out parameter; return the ICDFrame // // Return Value: // On success return the HRs above. // Return CORDBG_E_PAST_END_OF_STACK if we are already at the end of the stack. // Return E_INVALIDARG if ppFrame is NULL // Return E_FAIL on other errors. // // Notes: // This is just a wrapper with an EX_TRY/EX_CATCH_HRESULT for GetFrameWorker(). // HRESULT CordbStackWalk::GetFrame(ICorDebugFrame ** ppFrame) { HRESULT hr = S_OK; PUBLIC_REENTRANT_API_NO_LOCK_BEGIN(this) { ATT_REQUIRE_STOPPED_MAY_FAIL_OR_THROW(GetProcess(), ThrowHR); RSLockHolder lockHolder(GetProcess()->GetProcessLock()); RefreshIfNeeded(); hr = GetFrameWorker(ppFrame); } PUBLIC_REENTRANT_API_END(hr); if (FAILED(hr)) { if (m_fIsOneFrameAhead && (m_pCachedFrame == NULL)) { // We encountered a problem when we try to materialize a CordbNativeFrame. // Cache the failure HR so that we can return it later if the caller // calls GetFrame() again or GetContext(). m_cachedHR = hr; } } return hr; } //--------------------------------------------------------------------------------------- // // Refer to the comment for code:CordbStackWalk::GetFrame // HRESULT CordbStackWalk::GetFrameWorker(ICorDebugFrame ** ppFrame) { _ASSERTE(GetProcess()->GetProcessLock()->HasLock()); if (ppFrame == NULL) { ThrowHR(E_INVALIDARG); } *ppFrame = NULL; RSInitHolder<CordbFrame> pResultFrame(NULL); if (m_fIsOneFrameAhead) { if (m_pCachedFrame != NULL) { pResultFrame.Assign(m_pCachedFrame); pResultFrame.TransferOwnershipExternal(ppFrame); return S_OK; } else { // We encountered a problem when we were trying to initialize the CordbNativeFrame. // However, the problem occurred after we have unwound the current frame. // Whatever error code we return, it should be the same one GetContext() returns. _ASSERTE(FAILED(m_cachedHR)); ThrowHR(m_cachedHR); } } IDacDbiInterface * pDAC = NULL; DebuggerIPCE_STRData frameData; ZeroMemory(&frameData, sizeof(frameData)); IDacDbiInterface::FrameType ft = IDacDbiInterface::kInvalid; pDAC = GetProcess()->GetDAC(); ft = pDAC->GetStackWalkCurrentFrameInfo(m_pSFIHandle, &frameData); if (ft == IDacDbiInterface::kInvalid) { STRESS_LOG1(LF_CORDB, LL_INFO1000, "CSW::GFW - invalid stackwalker (%p)", this); ThrowHR(E_FAIL); } else if (ft == IDacDbiInterface::kAtEndOfStack) { STRESS_LOG1(LF_CORDB, LL_INFO1000, "CSW::GFW - past end of stack (%p)", this); ThrowHR(CORDBG_E_PAST_END_OF_STACK); } else if (ft == IDacDbiInterface::kNativeStackFrame) { STRESS_LOG1(LF_CORDB, LL_INFO1000, "CSW::GFW - native stack frame (%p)", this); return S_FALSE; } else if (ft == IDacDbiInterface::kExplicitFrame) { STRESS_LOG1(LF_CORDB, LL_INFO1000, "CSW::GFW - explicit frame (%p)", this); // We no longer expect to get internal frames by unwinding. GetProcess()->TargetConsistencyCheck(false); } else if (ft == IDacDbiInterface::kManagedStackFrame) { _ASSERTE(frameData.eType == DebuggerIPCE_STRData::cMethodFrame); HRESULT hr = S_OK; // In order to find the FramePointer on x86, we need to unwind to the next frame. // Technically, only x86 needs to do this, because the x86 runtime stackwalker doesn't uwnind // one frame ahead of time. However, we are doing this on all platforms to keep things simple. BOOL fSuccess = UnwindStackFrame(); (void)fSuccess; //prevent "unused variable" error from GCC _ASSERTE(fSuccess); m_fIsOneFrameAhead = true; #if defined(TARGET_X86) frameData.fp = pDAC->GetFramePointer(m_pSFIHandle); #endif // TARGET_X86 // currentFuncData contains general information about the method. // It has no information about any particular jitted instance of the method. DebuggerIPCE_FuncData * pFuncData = &(frameData.v.funcData); // currentJITFuncData contains information about the current jitted instance of the method // on the stack. DebuggerIPCE_JITFuncData * pJITFuncData = &(frameData.v.jitFuncData); // Lookup the appdomain that the thread was in when it was executing code for this frame. We pass this // to the frame when we create it so we can properly resolve locals in that frame later. CordbAppDomain * pCurrentAppDomain = GetProcess()->LookupOrCreateAppDomain(frameData.vmCurrentAppDomainToken); _ASSERTE(pCurrentAppDomain != NULL); // Lookup the module CordbModule* pModule = pCurrentAppDomain->LookupOrCreateModule(pFuncData->vmDomainAssembly); PREFIX_ASSUME(pModule != NULL); // Create or look up a CordbNativeCode. There is one for each jitted instance of a method, // and we may have multiple instances because of generics. CordbNativeCode * pNativeCode = pModule->LookupOrCreateNativeCode(pFuncData->funcMetadataToken, pJITFuncData->vmNativeCodeMethodDescToken, pJITFuncData->nativeStartAddressPtr); IfFailThrow(hr); // The native code object will create the function object if needed CordbFunction * pFunction = pNativeCode->GetFunction(); // A CordbFunction is theoretically the uninstantiated method, yet for back-compat we allow // debuggers to assume that it corresponds to exactly 1 native code blob. In order for // an open generic function to know what native code to give back, we attach an arbitrary // native code that we located through code inspection. // Note that not all CordbFunction objects get created via stack traces because you can also // create them by name. In that case you still won't get code for Open generic functions // because we will never have attached one and the lookup by token is insufficient. This // behavior mimics our 2.0 debugging behavior though so its not a regression. pFunction->NotifyCodeCreated(pNativeCode); IfFailThrow(hr); _ASSERTE((pFunction != NULL) && (pNativeCode != NULL)); // initialize the auxiliary info required for funclets CordbMiscFrame miscFrame(pJITFuncData); // Create the native frame. CordbNativeFrame* pNativeFrame = new CordbNativeFrame(m_pCordbThread, frameData.fp, pNativeCode, pJITFuncData->nativeOffset, &(frameData.rd), frameData.v.taAmbientESP, !!frameData.quicklyUnwound, pCurrentAppDomain, &miscFrame, &(frameData.ctx)); pResultFrame.Assign(static_cast<CordbFrame *>(pNativeFrame)); m_pCachedFrame.Assign(static_cast<CordbFrame *>(pNativeFrame)); // @dbgtodo dynamic language debugging // If we are dealing with a dynamic method (e.g. an IL stub, a LCG method, etc.), // then we don't have the metadata or the debug info (sequence points, etc.). // This means that we can't do anything meaningful with a CordbJITILFrame anyway, // so let's not create the CordbJITILFrame at all. Note that methods created with // RefEmit are okay, i.e. they have metadata. // The check for IsNativeImpl() != CordbFunction::kNativeOnly catches an odd profiler // case. A profiler can rewrite assemblies at load time so that a P/invoke becomes a // regular managed method. mscordbi isn't yet designed to handle runtime metadata // changes, so it still thinks the method is a p/invoke. If we only relied on // frameData.v.fNoMetadata which is populated by the DAC, that will report // FALSE (the method does have metadata/IL now). However pNativeCode->LoadNativeInfo // is going to check DBI's metadata and calculate this is a p/invoke, which will // throw an exception that the method isn't IL. // Ideally we probably want to expose the profiler's change to the method, // however that will take significant work. Part of that is correctly detecting and // updating metadata in DBI, part is determinging if/how the debugger is notified, // and part is auditing mscordbi to ensure that anything we cached based on the // old metadata is correctly invalidated. // Since this is a late fix going into a controlled servicing release I have // opted for a much narrower fix. Doing the check for IsNativeImpl() != CordbFunction::kNativeOnly // will continue to treat our new method as though it was a p/invoke, and the // debugger will not provide IL for it. The debugger can't inspect within the profiler // modified method, but at least the error won't leak out to interfere with inspection // of the callstack as a whole. if (!frameData.v.fNoMetadata && pNativeCode->GetFunction()->IsNativeImpl() != CordbFunction::kNativeOnly) { pNativeCode->LoadNativeInfo(); // By design, when a managed exception occurs we return the sequence point containing the faulting // instruction in the leaf frame. In the past we didn't always achieve this, // but we are being more deliberate about this behavior now. // If jsutAfterILThrow is true, it means nativeOffset points to the return address of IL_Throw // (or another JIT exception helper) after an exception has been thrown. // In such cases we want to adjust nativeOffset, so it will point an actual exception callsite. // By subtracting STACKWALK_CONTROLPC_ADJUST_OFFSET from nativeOffset you can get // an address somewhere inside CALL instruction. // This ensures more consistent placement of exception line highlighting in Visual Studio DWORD nativeOffsetToMap = pJITFuncData->jsutAfterILThrow ? (DWORD)pJITFuncData->nativeOffset - STACKWALK_CONTROLPC_ADJUST_OFFSET : (DWORD)pJITFuncData->nativeOffset; CorDebugMappingResult mappingType; ULONG uILOffset = pNativeCode->GetSequencePoints()->MapNativeOffsetToIL( nativeOffsetToMap, &mappingType); // Find or create the IL Code, and the pJITILFrame. RSExtSmartPtr<CordbILCode> pCode; // The code for populating CordbFunction ILCode looks really bizzare... it appears to only grab the // correct version of the IL if that is still the current EnC version yet it is populated deliberately // late bound at which point the latest version may be different. In fact even here the latest version // could already be different, but this is no worse than what the code used to do hr = pFunction->GetILCode(&pCode); IfFailThrow(hr); _ASSERTE(pCode != NULL); // We populate the code for ReJit eagerly to make sure we still have it if the profiler removes the // instrumentation later. Of course the only way it will still be accessible to our caller is if they // save a pointer to the ILCode. // I'm not sure if ignoring rejit for mini-dumps is the right call long term, but we aren't doing // anything special to collect the memory at dump time so we better be prepared to not fetch it here. // We'll attempt to treat it as not being instrumented, though I suspect the abstraction is leaky. RSSmartPtr<CordbReJitILCode> pReJitCode; EX_TRY_ALLOW_DATATARGET_MISSING_MEMORY { VMPTR_NativeCodeVersionNode vmNativeCodeVersionNode = VMPTR_NativeCodeVersionNode::NullPtr(); IfFailThrow(GetProcess()->GetDAC()->GetNativeCodeVersionNode(pJITFuncData->vmNativeCodeMethodDescToken, pJITFuncData->nativeStartAddressPtr, &vmNativeCodeVersionNode)); if (!vmNativeCodeVersionNode.IsNull()) { VMPTR_ILCodeVersionNode vmILCodeVersionNode = VMPTR_ILCodeVersionNode::NullPtr(); IfFailThrow(GetProcess()->GetDAC()->GetILCodeVersionNode(vmNativeCodeVersionNode, &vmILCodeVersionNode)); if (!vmILCodeVersionNode.IsNull()) { IfFailThrow(pFunction->LookupOrCreateReJitILCode(vmILCodeVersionNode, &pReJitCode)); } } } EX_END_CATCH_ALLOW_DATATARGET_MISSING_MEMORY RSInitHolder<CordbJITILFrame> pJITILFrame(new CordbJITILFrame(pNativeFrame, pCode, uILOffset, mappingType, frameData.v.exactGenericArgsToken, frameData.v.dwExactGenericArgsTokenIndex, !!frameData.v.fVarArgs, pReJitCode)); // Initialize the frame. This is a nop if the method is not a vararg method. hr = pJITILFrame->Init(); IfFailThrow(hr); pNativeFrame->m_JITILFrame.Assign(pJITILFrame); pJITILFrame.ClearAndMarkDontNeuter(); } STRESS_LOG3(LF_CORDB, LL_INFO1000, "CSW::GFW - managed stack frame (%p): CNF - 0x%p, CJILF - 0x%p", this, pNativeFrame, pNativeFrame->m_JITILFrame.GetValue()); } // kManagedStackFrame else if (ft == IDacDbiInterface::kNativeRuntimeUnwindableStackFrame) { _ASSERTE(frameData.eType == DebuggerIPCE_STRData::cRuntimeNativeFrame); // In order to find the FramePointer on x86, we need to unwind to the next frame. // Technically, only x86 needs to do this, because the x86 runtime stackwalker doesn't uwnind // one frame ahead of time. However, we are doing this on all platforms to keep things simple. BOOL fSuccess = UnwindStackFrame(); (void)fSuccess; //prevent "unused variable" error from GCC _ASSERTE(fSuccess); m_fIsOneFrameAhead = true; #if defined(TARGET_X86) frameData.fp = pDAC->GetFramePointer(m_pSFIHandle); #endif // TARGET_X86 // Lookup the appdomain that the thread was in when it was executing code for this frame. We pass this // to the frame when we create it so we can properly resolve locals in that frame later. CordbAppDomain * pCurrentAppDomain = GetProcess()->LookupOrCreateAppDomain(frameData.vmCurrentAppDomainToken); _ASSERTE(pCurrentAppDomain != NULL); CordbRuntimeUnwindableFrame * pRuntimeFrame = new CordbRuntimeUnwindableFrame(m_pCordbThread, frameData.fp, pCurrentAppDomain, &(frameData.ctx)); pResultFrame.Assign(static_cast<CordbFrame *>(pRuntimeFrame)); m_pCachedFrame.Assign(static_cast<CordbFrame *>(pRuntimeFrame)); STRESS_LOG2(LF_CORDB, LL_INFO1000, "CSW::GFW - runtime unwindable stack frame (%p): 0x%p", this, pRuntimeFrame); } pResultFrame.TransferOwnershipExternal(ppFrame); return S_OK; }
-1
dotnet/runtime
66,367
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled
When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
jakobbotsch
2022-03-09T00:01:53Z
2022-03-09T12:59:09Z
c9f7f7389e8e9a00d501aef696333b67d218baac
cef825fd5425203ac28d073bf9fccc33c638f179
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled. When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
./src/coreclr/vm/delegateinfo.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================ ** ** Header: DelegateInfo.h ** ** ** Purpose: Native methods on System.ThreadPool ** and its inner classes ** ** ===========================================================*/ #ifndef DELEGATE_INFO #define DELEGATE_INFO struct DelegateInfo; typedef DelegateInfo* DelegateInfoPtr; struct DelegateInfo { OBJECTHANDLE m_stateHandle; OBJECTHANDLE m_eventHandle; OBJECTHANDLE m_registeredWaitHandle; #ifndef DACCESS_COMPILE void Release() { CONTRACTL { // m_compressedStack->Release() can actually throw today because it has got a call // to new down the stack. However that is recent and the semantic of that api is such // it should not throw. I am expecting clenup of that function to take care of that // so I am adding this comment to make sure the issue is document. // Remove this comment once that work is done NOTHROW; GC_TRIGGERS; MODE_COOPERATIVE; FORBID_FAULT; } CONTRACTL_END; if (m_stateHandle) DestroyHandle(m_stateHandle); if (m_eventHandle) DestroyHandle(m_eventHandle); if (m_registeredWaitHandle) DestroyHandle(m_registeredWaitHandle); } #endif static DelegateInfo *MakeDelegateInfo(OBJECTREF *state, OBJECTREF *waitEvent, OBJECTREF *registeredWaitObject); }; #endif // DELEGATE_INFO
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================ ** ** Header: DelegateInfo.h ** ** ** Purpose: Native methods on System.ThreadPool ** and its inner classes ** ** ===========================================================*/ #ifndef DELEGATE_INFO #define DELEGATE_INFO struct DelegateInfo; typedef DelegateInfo* DelegateInfoPtr; struct DelegateInfo { OBJECTHANDLE m_stateHandle; OBJECTHANDLE m_eventHandle; OBJECTHANDLE m_registeredWaitHandle; #ifndef DACCESS_COMPILE void Release() { CONTRACTL { // m_compressedStack->Release() can actually throw today because it has got a call // to new down the stack. However that is recent and the semantic of that api is such // it should not throw. I am expecting clenup of that function to take care of that // so I am adding this comment to make sure the issue is document. // Remove this comment once that work is done NOTHROW; GC_TRIGGERS; MODE_COOPERATIVE; FORBID_FAULT; } CONTRACTL_END; if (m_stateHandle) DestroyHandle(m_stateHandle); if (m_eventHandle) DestroyHandle(m_eventHandle); if (m_registeredWaitHandle) DestroyHandle(m_registeredWaitHandle); } #endif static DelegateInfo *MakeDelegateInfo(OBJECTREF *state, OBJECTREF *waitEvent, OBJECTREF *registeredWaitObject); }; #endif // DELEGATE_INFO
-1
dotnet/runtime
66,367
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled
When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
jakobbotsch
2022-03-09T00:01:53Z
2022-03-09T12:59:09Z
c9f7f7389e8e9a00d501aef696333b67d218baac
cef825fd5425203ac28d073bf9fccc33c638f179
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled. When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
./src/coreclr/debug/shared/i386/primitives.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. //***************************************************************************** // File: primitives.cpp // // // Platform-specific debugger primitives // //***************************************************************************** #include "primitives.h" // // CopyThreadContext() does an intelligent copy from pSrc to pDst, // respecting the ContextFlags of both contexts. // void CORDbgCopyThreadContext(DT_CONTEXT* pDst, const DT_CONTEXT* pSrc) { DWORD dstFlags = pDst->ContextFlags; DWORD srcFlags = pSrc->ContextFlags; LOG((LF_CORDB, LL_INFO1000000, "CP::CTC: pDst=0x%08x dstFlags=0x%x, pSrc=0x%08x srcFlags=0x%x\n", pDst, dstFlags, pSrc, srcFlags)); if ((dstFlags & srcFlags & DT_CONTEXT_CONTROL) == DT_CONTEXT_CONTROL) CopyContextChunk(&(pDst->Ebp), &(pSrc->Ebp), pDst->ExtendedRegisters, DT_CONTEXT_CONTROL); if ((dstFlags & srcFlags & DT_CONTEXT_INTEGER) == DT_CONTEXT_INTEGER) CopyContextChunk(&(pDst->Edi), &(pSrc->Edi), &(pDst->Ebp), DT_CONTEXT_INTEGER); if ((dstFlags & srcFlags & DT_CONTEXT_SEGMENTS) == DT_CONTEXT_SEGMENTS) CopyContextChunk(&(pDst->SegGs), &(pSrc->SegGs), &(pDst->Edi), DT_CONTEXT_SEGMENTS); if ((dstFlags & srcFlags & DT_CONTEXT_FLOATING_POINT) == DT_CONTEXT_FLOATING_POINT) CopyContextChunk(&(pDst->FloatSave), &(pSrc->FloatSave), (&pDst->FloatSave)+1, DT_CONTEXT_FLOATING_POINT); if ((dstFlags & srcFlags & DT_CONTEXT_DEBUG_REGISTERS) == DT_CONTEXT_DEBUG_REGISTERS) CopyContextChunk(&(pDst->Dr0), &(pSrc->Dr0), &(pDst->FloatSave), DT_CONTEXT_DEBUG_REGISTERS); if ((dstFlags & srcFlags & DT_CONTEXT_EXTENDED_REGISTERS) == DT_CONTEXT_EXTENDED_REGISTERS) CopyContextChunk(pDst->ExtendedRegisters, pSrc->ExtendedRegisters, &(pDst->ExtendedRegisters[MAXIMUM_SUPPORTED_EXTENSION]), DT_CONTEXT_EXTENDED_REGISTERS); } // Update the regdisplay from a given context. void CORDbgSetDebuggerREGDISPLAYFromContext(DebuggerREGDISPLAY *pDRD, DT_CONTEXT* pContext) { // We must pay attention to the context flags so that we only use valid portions // of the context. DWORD flags = pContext->ContextFlags; if ((flags & DT_CONTEXT_CONTROL) == DT_CONTEXT_CONTROL) { pDRD->PC = (SIZE_T)CORDbgGetIP(pContext); pDRD->SP = (SIZE_T)CORDbgGetSP(pContext); pDRD->FP = (SIZE_T)CORDbgGetFP(pContext); } if ((flags & DT_CONTEXT_INTEGER) == DT_CONTEXT_INTEGER) { pDRD->Eax = pContext->Eax; pDRD->Ebx = pContext->Ebx; pDRD->Ecx = pContext->Ecx; pDRD->Edx = pContext->Edx; pDRD->Esi = pContext->Esi; pDRD->Edi = pContext->Edi; } } #if defined(ALLOW_VMPTR_ACCESS) || !defined(RIGHT_SIDE_COMPILE) void SetDebuggerREGDISPLAYFromREGDISPLAY(DebuggerREGDISPLAY* pDRD, REGDISPLAY* pRD) { SUPPORTS_DAC_HOST_ONLY; // Frame pointer LPVOID FPAddress = GetRegdisplayFPAddress(pRD); pDRD->FP = (FPAddress == NULL ? 0 : *((SIZE_T *)FPAddress)); pDRD->Edi = (pRD->GetEdiLocation() == NULL ? 0 : *pRD->GetEdiLocation()); pDRD->Esi = (pRD->GetEsiLocation() == NULL ? 0 : *pRD->GetEsiLocation()); pDRD->Ebx = (pRD->GetEbxLocation() == NULL ? 0 : *pRD->GetEbxLocation()); pDRD->Edx = (pRD->GetEdxLocation() == NULL ? 0 : *pRD->GetEdxLocation()); pDRD->Ecx = (pRD->GetEcxLocation() == NULL ? 0 : *pRD->GetEcxLocation()); pDRD->Eax = (pRD->GetEaxLocation() == NULL ? 0 : *pRD->GetEaxLocation()); #if defined(USE_REMOTE_REGISTER_ADDRESS) pDRD->pFP = PushedRegAddr(pRD, FPAddress); pDRD->pEdi = PushedRegAddr(pRD, pRD->pEdi); pDRD->pEsi = PushedRegAddr(pRD, pRD->pEsi); pDRD->pEbx = PushedRegAddr(pRD, pRD->pEbx); pDRD->pEdx = PushedRegAddr(pRD, pRD->pEdx); pDRD->pEcx = PushedRegAddr(pRD, pRD->pEcx); pDRD->pEax = PushedRegAddr(pRD, pRD->pEax); #else // !USE_REMOTE_REGISTER_ADDRESS pDRD->pFP = NULL; pDRD->pEdi = NULL; pDRD->pEsi = NULL; pDRD->pEbx = NULL; pDRD->pEdx = NULL; pDRD->pEcx = NULL; pDRD->pEax = NULL; #endif // !USE_REMOTE_REGISTER_ADDRESS pDRD->SP = pRD->SP; pDRD->PC = pRD->ControlPC; // Please leave EBP, ESP, EIP at the front so I don't have to scroll // left to see the most important registers. Thanks! LOG( (LF_CORDB, LL_INFO1000, "DT::TASSC:Registers:" "Ebp = %x Esp = %x Eip = %x Edi:%d " "Esi = %x Ebx = %x Edx = %x Ecx = %x Eax = %x\n", pDRD->FP, pDRD->SP, pDRD->PC, pDRD->Edi, pDRD->Esi, pDRD->Ebx, pDRD->Edx, pDRD->Ecx, pDRD->Eax ) ); } #endif // ALLOW_VMPTR_ACCESS || !RIGHT_SIDE_COMPILE
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. //***************************************************************************** // File: primitives.cpp // // // Platform-specific debugger primitives // //***************************************************************************** #include "primitives.h" // // CopyThreadContext() does an intelligent copy from pSrc to pDst, // respecting the ContextFlags of both contexts. // void CORDbgCopyThreadContext(DT_CONTEXT* pDst, const DT_CONTEXT* pSrc) { DWORD dstFlags = pDst->ContextFlags; DWORD srcFlags = pSrc->ContextFlags; LOG((LF_CORDB, LL_INFO1000000, "CP::CTC: pDst=0x%08x dstFlags=0x%x, pSrc=0x%08x srcFlags=0x%x\n", pDst, dstFlags, pSrc, srcFlags)); if ((dstFlags & srcFlags & DT_CONTEXT_CONTROL) == DT_CONTEXT_CONTROL) CopyContextChunk(&(pDst->Ebp), &(pSrc->Ebp), pDst->ExtendedRegisters, DT_CONTEXT_CONTROL); if ((dstFlags & srcFlags & DT_CONTEXT_INTEGER) == DT_CONTEXT_INTEGER) CopyContextChunk(&(pDst->Edi), &(pSrc->Edi), &(pDst->Ebp), DT_CONTEXT_INTEGER); if ((dstFlags & srcFlags & DT_CONTEXT_SEGMENTS) == DT_CONTEXT_SEGMENTS) CopyContextChunk(&(pDst->SegGs), &(pSrc->SegGs), &(pDst->Edi), DT_CONTEXT_SEGMENTS); if ((dstFlags & srcFlags & DT_CONTEXT_FLOATING_POINT) == DT_CONTEXT_FLOATING_POINT) CopyContextChunk(&(pDst->FloatSave), &(pSrc->FloatSave), (&pDst->FloatSave)+1, DT_CONTEXT_FLOATING_POINT); if ((dstFlags & srcFlags & DT_CONTEXT_DEBUG_REGISTERS) == DT_CONTEXT_DEBUG_REGISTERS) CopyContextChunk(&(pDst->Dr0), &(pSrc->Dr0), &(pDst->FloatSave), DT_CONTEXT_DEBUG_REGISTERS); if ((dstFlags & srcFlags & DT_CONTEXT_EXTENDED_REGISTERS) == DT_CONTEXT_EXTENDED_REGISTERS) CopyContextChunk(pDst->ExtendedRegisters, pSrc->ExtendedRegisters, &(pDst->ExtendedRegisters[MAXIMUM_SUPPORTED_EXTENSION]), DT_CONTEXT_EXTENDED_REGISTERS); } // Update the regdisplay from a given context. void CORDbgSetDebuggerREGDISPLAYFromContext(DebuggerREGDISPLAY *pDRD, DT_CONTEXT* pContext) { // We must pay attention to the context flags so that we only use valid portions // of the context. DWORD flags = pContext->ContextFlags; if ((flags & DT_CONTEXT_CONTROL) == DT_CONTEXT_CONTROL) { pDRD->PC = (SIZE_T)CORDbgGetIP(pContext); pDRD->SP = (SIZE_T)CORDbgGetSP(pContext); pDRD->FP = (SIZE_T)CORDbgGetFP(pContext); } if ((flags & DT_CONTEXT_INTEGER) == DT_CONTEXT_INTEGER) { pDRD->Eax = pContext->Eax; pDRD->Ebx = pContext->Ebx; pDRD->Ecx = pContext->Ecx; pDRD->Edx = pContext->Edx; pDRD->Esi = pContext->Esi; pDRD->Edi = pContext->Edi; } } #if defined(ALLOW_VMPTR_ACCESS) || !defined(RIGHT_SIDE_COMPILE) void SetDebuggerREGDISPLAYFromREGDISPLAY(DebuggerREGDISPLAY* pDRD, REGDISPLAY* pRD) { SUPPORTS_DAC_HOST_ONLY; // Frame pointer LPVOID FPAddress = GetRegdisplayFPAddress(pRD); pDRD->FP = (FPAddress == NULL ? 0 : *((SIZE_T *)FPAddress)); pDRD->Edi = (pRD->GetEdiLocation() == NULL ? 0 : *pRD->GetEdiLocation()); pDRD->Esi = (pRD->GetEsiLocation() == NULL ? 0 : *pRD->GetEsiLocation()); pDRD->Ebx = (pRD->GetEbxLocation() == NULL ? 0 : *pRD->GetEbxLocation()); pDRD->Edx = (pRD->GetEdxLocation() == NULL ? 0 : *pRD->GetEdxLocation()); pDRD->Ecx = (pRD->GetEcxLocation() == NULL ? 0 : *pRD->GetEcxLocation()); pDRD->Eax = (pRD->GetEaxLocation() == NULL ? 0 : *pRD->GetEaxLocation()); #if defined(USE_REMOTE_REGISTER_ADDRESS) pDRD->pFP = PushedRegAddr(pRD, FPAddress); pDRD->pEdi = PushedRegAddr(pRD, pRD->pEdi); pDRD->pEsi = PushedRegAddr(pRD, pRD->pEsi); pDRD->pEbx = PushedRegAddr(pRD, pRD->pEbx); pDRD->pEdx = PushedRegAddr(pRD, pRD->pEdx); pDRD->pEcx = PushedRegAddr(pRD, pRD->pEcx); pDRD->pEax = PushedRegAddr(pRD, pRD->pEax); #else // !USE_REMOTE_REGISTER_ADDRESS pDRD->pFP = NULL; pDRD->pEdi = NULL; pDRD->pEsi = NULL; pDRD->pEbx = NULL; pDRD->pEdx = NULL; pDRD->pEcx = NULL; pDRD->pEax = NULL; #endif // !USE_REMOTE_REGISTER_ADDRESS pDRD->SP = pRD->SP; pDRD->PC = pRD->ControlPC; // Please leave EBP, ESP, EIP at the front so I don't have to scroll // left to see the most important registers. Thanks! LOG( (LF_CORDB, LL_INFO1000, "DT::TASSC:Registers:" "Ebp = %x Esp = %x Eip = %x Edi:%d " "Esi = %x Ebx = %x Edx = %x Ecx = %x Eax = %x\n", pDRD->FP, pDRD->SP, pDRD->PC, pDRD->Edi, pDRD->Esi, pDRD->Ebx, pDRD->Edx, pDRD->Ecx, pDRD->Eax ) ); } #endif // ALLOW_VMPTR_ACCESS || !RIGHT_SIDE_COMPILE
-1
dotnet/runtime
66,367
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled
When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
jakobbotsch
2022-03-09T00:01:53Z
2022-03-09T12:59:09Z
c9f7f7389e8e9a00d501aef696333b67d218baac
cef825fd5425203ac28d073bf9fccc33c638f179
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled. When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
./src/coreclr/vm/comdependenthandle.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // File: COMDependentHandle.cpp // // // FCall's for the DependentHandle class // // Handle functions require cooperative mode, making these fcalls poor candidates for QCall conversion. // #include "common.h" #include "comdependenthandle.h" FCIMPL2(OBJECTHANDLE, DependentHandle::InternalInitialize, Object *_target, Object *_dependent) { FCALL_CONTRACT; OBJECTREF target(_target); OBJECTREF dependent(_dependent); OBJECTHANDLE result = NULL; HELPER_METHOD_FRAME_BEGIN_RET_NOPOLL(); // Create the handle. result = GetAppDomain()->CreateDependentHandle(target, dependent); HELPER_METHOD_FRAME_END_POLL(); return result; } FCIMPLEND FCIMPL1(Object*, DependentHandle::InternalGetTarget, OBJECTHANDLE handle) { FCALL_CONTRACT; FCUnique(0x54); _ASSERTE(handle != NULL); return OBJECTREFToObject(ObjectFromHandle(handle)); } FCIMPLEND FCIMPL1(Object*, DependentHandle::InternalGetDependent, OBJECTHANDLE handle) { FCALL_CONTRACT; _ASSERTE(handle != NULL); OBJECTREF target = ObjectFromHandle(handle); IGCHandleManager *mgr = GCHandleUtilities::GetGCHandleManager(); // The dependent is tracked only if target is non-null return (target != NULL) ? mgr->GetDependentHandleSecondary(handle) : NULL; } FCIMPLEND FCIMPL2(Object*, DependentHandle::InternalGetTargetAndDependent, OBJECTHANDLE handle, Object **outDependent) { FCALL_CONTRACT; _ASSERTE(handle != NULL && outDependent != NULL); OBJECTREF target = ObjectFromHandle(handle); IGCHandleManager *mgr = GCHandleUtilities::GetGCHandleManager(); // The dependent is tracked only if target is non-null *outDependent = (target != NULL) ? mgr->GetDependentHandleSecondary(handle) : NULL; return OBJECTREFToObject(target); } FCIMPLEND FCIMPL1(VOID, DependentHandle::InternalSetTargetToNull, OBJECTHANDLE handle) { FCALL_CONTRACT; _ASSERTE(handle != NULL); IGCHandleManager *mgr = GCHandleUtilities::GetGCHandleManager(); mgr->StoreObjectInHandle(handle, NULL); } FCIMPLEND FCIMPL2(VOID, DependentHandle::InternalSetDependent, OBJECTHANDLE handle, Object *_dependent) { FCALL_CONTRACT; _ASSERTE(handle != NULL); IGCHandleManager *mgr = GCHandleUtilities::GetGCHandleManager(); mgr->SetDependentHandleSecondary(handle, _dependent); } FCIMPLEND FCIMPL1(VOID, DependentHandle::InternalFree, OBJECTHANDLE handle) { FCALL_CONTRACT; _ASSERTE(handle != NULL); HELPER_METHOD_FRAME_BEGIN_0(); DestroyDependentHandle(handle); HELPER_METHOD_FRAME_END(); } FCIMPLEND
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // File: COMDependentHandle.cpp // // // FCall's for the DependentHandle class // // Handle functions require cooperative mode, making these fcalls poor candidates for QCall conversion. // #include "common.h" #include "comdependenthandle.h" FCIMPL2(OBJECTHANDLE, DependentHandle::InternalInitialize, Object *_target, Object *_dependent) { FCALL_CONTRACT; OBJECTREF target(_target); OBJECTREF dependent(_dependent); OBJECTHANDLE result = NULL; HELPER_METHOD_FRAME_BEGIN_RET_NOPOLL(); // Create the handle. result = GetAppDomain()->CreateDependentHandle(target, dependent); HELPER_METHOD_FRAME_END_POLL(); return result; } FCIMPLEND FCIMPL1(Object*, DependentHandle::InternalGetTarget, OBJECTHANDLE handle) { FCALL_CONTRACT; FCUnique(0x54); _ASSERTE(handle != NULL); return OBJECTREFToObject(ObjectFromHandle(handle)); } FCIMPLEND FCIMPL1(Object*, DependentHandle::InternalGetDependent, OBJECTHANDLE handle) { FCALL_CONTRACT; _ASSERTE(handle != NULL); OBJECTREF target = ObjectFromHandle(handle); IGCHandleManager *mgr = GCHandleUtilities::GetGCHandleManager(); // The dependent is tracked only if target is non-null return (target != NULL) ? mgr->GetDependentHandleSecondary(handle) : NULL; } FCIMPLEND FCIMPL2(Object*, DependentHandle::InternalGetTargetAndDependent, OBJECTHANDLE handle, Object **outDependent) { FCALL_CONTRACT; _ASSERTE(handle != NULL && outDependent != NULL); OBJECTREF target = ObjectFromHandle(handle); IGCHandleManager *mgr = GCHandleUtilities::GetGCHandleManager(); // The dependent is tracked only if target is non-null *outDependent = (target != NULL) ? mgr->GetDependentHandleSecondary(handle) : NULL; return OBJECTREFToObject(target); } FCIMPLEND FCIMPL1(VOID, DependentHandle::InternalSetTargetToNull, OBJECTHANDLE handle) { FCALL_CONTRACT; _ASSERTE(handle != NULL); IGCHandleManager *mgr = GCHandleUtilities::GetGCHandleManager(); mgr->StoreObjectInHandle(handle, NULL); } FCIMPLEND FCIMPL2(VOID, DependentHandle::InternalSetDependent, OBJECTHANDLE handle, Object *_dependent) { FCALL_CONTRACT; _ASSERTE(handle != NULL); IGCHandleManager *mgr = GCHandleUtilities::GetGCHandleManager(); mgr->SetDependentHandleSecondary(handle, _dependent); } FCIMPLEND FCIMPL1(VOID, DependentHandle::InternalFree, OBJECTHANDLE handle) { FCALL_CONTRACT; _ASSERTE(handle != NULL); HELPER_METHOD_FRAME_BEGIN_0(); DestroyDependentHandle(handle); HELPER_METHOD_FRAME_END(); } FCIMPLEND
-1
dotnet/runtime
66,367
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled
When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
jakobbotsch
2022-03-09T00:01:53Z
2022-03-09T12:59:09Z
c9f7f7389e8e9a00d501aef696333b67d218baac
cef825fd5425203ac28d073bf9fccc33c638f179
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled. When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
./src/coreclr/vm/vmholder.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #ifndef __VMHOLDER_H_ #define __VMHOLDER_H_ #include "holder.h" template <typename TYPE> inline void DoTheReleaseHost(TYPE *value) { if (value) { value->Release(); } } template<typename _TYPE> using HostComHolder = SpecializedWrapper<_TYPE, DoTheReleaseHost<_TYPE>>; #endif
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #ifndef __VMHOLDER_H_ #define __VMHOLDER_H_ #include "holder.h" template <typename TYPE> inline void DoTheReleaseHost(TYPE *value) { if (value) { value->Release(); } } template<typename _TYPE> using HostComHolder = SpecializedWrapper<_TYPE, DoTheReleaseHost<_TYPE>>; #endif
-1
dotnet/runtime
66,367
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled
When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
jakobbotsch
2022-03-09T00:01:53Z
2022-03-09T12:59:09Z
c9f7f7389e8e9a00d501aef696333b67d218baac
cef825fd5425203ac28d073bf9fccc33c638f179
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled. When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
./src/coreclr/pal/src/exception/seh.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*++ Module Name: seh.cpp Abstract: Implementation of exception API functions. --*/ #include "pal/thread.hpp" #include "pal/handleapi.hpp" #include "pal/seh.hpp" #include "pal/dbgmsg.h" #include "pal/critsect.h" #include "pal/debug.h" #include "pal/init.h" #include "pal/process.h" #include "pal/malloc.hpp" #include "pal/signal.hpp" #include "pal/virtual.h" #if HAVE_MACH_EXCEPTIONS #include "machexception.h" #else #include <signal.h> #endif #include <string.h> #include <unistd.h> #include <pthread.h> #include <stdlib.h> #include <utility> using namespace CorUnix; SET_DEFAULT_DEBUG_CHANNEL(EXCEPT); /* Constant and type definitions **********************************************/ /* Bit 28 of exception codes is reserved. */ const UINT RESERVED_SEH_BIT = 0x800000; /* Internal variables definitions **********************************************/ PHARDWARE_EXCEPTION_HANDLER g_hardwareExceptionHandler = NULL; // Function to check if an activation can be safely injected at a specified context PHARDWARE_EXCEPTION_SAFETY_CHECK_FUNCTION g_safeExceptionCheckFunction = NULL; PGET_GCMARKER_EXCEPTION_CODE g_getGcMarkerExceptionCode = NULL; // Return address of the SEHProcessException, which is used to enable walking over // the signal handler trampoline on some Unixes where the libunwind cannot do that. void* g_SEHProcessExceptionReturnAddress = NULL; /* Internal function definitions **********************************************/ /*++ Function : SEHInitialize Initialize all SEH-related stuff (signals, etc) Parameters : CPalThread * pthrCurrent : reference to the current thread. PAL initialize flags Return value : TRUE if SEH support initialization succeeded FALSE otherwise --*/ BOOL SEHInitialize (CPalThread *pthrCurrent, DWORD flags) { if (!SEHInitializeSignals(pthrCurrent, flags)) { ERROR("SEHInitializeSignals failed!\n"); SEHCleanup(); return FALSE; } return TRUE; } /*++ Function : SEHCleanup Undo work done by SEHInitialize Parameters : None (no return value) --*/ VOID SEHCleanup() { TRACE("Cleaning up SEH\n"); SEHCleanupSignals(); } /*++ Function: PAL_SetHardwareExceptionHandler Register a hardware exception handler. Parameters: handler - exception handler Return value: None --*/ VOID PALAPI PAL_SetHardwareExceptionHandler( IN PHARDWARE_EXCEPTION_HANDLER exceptionHandler, IN PHARDWARE_EXCEPTION_SAFETY_CHECK_FUNCTION exceptionCheckFunction) { g_hardwareExceptionHandler = exceptionHandler; g_safeExceptionCheckFunction = exceptionCheckFunction; } /*++ Function: PAL_SetGetGcMarkerExceptionCode Register a function that determines if the specified IP has code that is a GC marker for GCCover. Parameters: getGcMarkerExceptionCode - the function to register Return value: None --*/ VOID PALAPI PAL_SetGetGcMarkerExceptionCode( IN PGET_GCMARKER_EXCEPTION_CODE getGcMarkerExceptionCode) { g_getGcMarkerExceptionCode = getGcMarkerExceptionCode; } EXTERN_C void ThrowExceptionFromContextInternal(CONTEXT* context, PAL_SEHException* ex); /*++ Function: PAL_ThrowExceptionFromContext This function creates a stack frame right below the target frame, restores all callee saved registers from the passed in context, sets the RSP to that frame and sets the return address to the target frame's RIP. Then it uses the ThrowExceptionHelper to throw the passed in exception from that context. Parameters: CONTEXT* context - context from which the exception will be thrown PAL_SEHException* ex - the exception to throw. --*/ VOID PALAPI PAL_ThrowExceptionFromContext(CONTEXT* context, PAL_SEHException* ex) { // We need to make a copy of the exception off stack, since the "ex" is located in one of the stack // frames that will become obsolete by the ThrowExceptionFromContextInternal and the ThrowExceptionHelper // could overwrite the "ex" object by stack e.g. when allocating the low level exception object for "throw". static __thread BYTE threadLocalExceptionStorage[sizeof(PAL_SEHException)]; ThrowExceptionFromContextInternal(context, new (threadLocalExceptionStorage) PAL_SEHException(std::move(*ex))); } /*++ Function: ThrowExceptionHelper Helper function to throw the passed in exception. It is called from the assembler function ThrowExceptionFromContextInternal Parameters: PAL_SEHException* ex - the exception to throw. --*/ extern "C" #ifdef HOST_X86 void __fastcall ThrowExceptionHelper(PAL_SEHException* ex) #else // HOST_X86 void ThrowExceptionHelper(PAL_SEHException* ex) #endif // !HOST_X86 { throw std::move(*ex); } /*++ Function: EnsureExceptionRecordsOnHeap Helper function to move records from stack to heap. Parameters: PAL_SEHException* exception --*/ static void EnsureExceptionRecordsOnHeap(PAL_SEHException* exception) { if( !exception->RecordsOnStack || exception->ExceptionPointers.ExceptionRecord == NULL ) { return; } CONTEXT* contextRecord = exception->ExceptionPointers.ContextRecord; EXCEPTION_RECORD* exceptionRecord = exception->ExceptionPointers.ExceptionRecord; CONTEXT* contextRecordCopy; EXCEPTION_RECORD* exceptionRecordCopy; AllocateExceptionRecords(&exceptionRecordCopy, &contextRecordCopy); *exceptionRecordCopy = *exceptionRecord; *contextRecordCopy = *contextRecord; exception->ExceptionPointers.ExceptionRecord = exceptionRecordCopy; exception->ExceptionPointers.ContextRecord = contextRecordCopy; exception->RecordsOnStack = false; } /*++ Function: SEHProcessException Send the PAL exception to any handler registered. Parameters: PAL_SEHException* exception Return value: Returns TRUE if the exception happened in managed code and the execution should continue (with possibly modified context). Returns FALSE if the exception happened in managed code and it was not handled. In case the exception was handled by calling a catch handler, it doesn't return at all. --*/ BOOL SEHProcessException(PAL_SEHException* exception) { g_SEHProcessExceptionReturnAddress = __builtin_return_address(0); CONTEXT* contextRecord = exception->GetContextRecord(); EXCEPTION_RECORD* exceptionRecord = exception->GetExceptionRecord(); if (!IsInDebugBreak(exceptionRecord->ExceptionAddress)) { if (g_hardwareExceptionHandler != NULL) { _ASSERTE(g_safeExceptionCheckFunction != NULL); // Check if it is safe to handle the hardware exception (the exception happened in managed code // or in a jitter helper or it is a debugger breakpoint) if (g_safeExceptionCheckFunction(contextRecord, exceptionRecord)) { EnsureExceptionRecordsOnHeap(exception); if (g_hardwareExceptionHandler(exception)) { // The exception happened in managed code and the execution should continue. return TRUE; } // The exception was a single step or a breakpoint and it was not handled by the debugger. } } if (CatchHardwareExceptionHolder::IsEnabled()) { EnsureExceptionRecordsOnHeap(exception); PAL_ThrowExceptionFromContext(exception->GetContextRecord(), exception); } } // Unhandled hardware exception pointers->ExceptionRecord->ExceptionCode at pointers->ExceptionRecord->ExceptionAddress return FALSE; } /*++ Function : SEHEnable Enable SEH-related stuff on this thread Parameters: CPalThread * pthrCurrent : reference to the current thread. Return value : TRUE if enabling succeeded FALSE otherwise --*/ extern "C" PAL_ERROR SEHEnable(CPalThread *pthrCurrent) { #if HAVE_MACH_EXCEPTIONS return pthrCurrent->EnableMachExceptions(); #elif defined(__linux__) || defined(__FreeBSD__) || defined(__NetBSD__) || defined(__sun) return NO_ERROR; #else// HAVE_MACH_EXCEPTIONS #error not yet implemented #endif // HAVE_MACH_EXCEPTIONS } /*++ Function : SEHDisable Disable SEH-related stuff on this thread Parameters: CPalThread * pthrCurrent : reference to the current thread. Return value : TRUE if enabling succeeded FALSE otherwise --*/ extern "C" PAL_ERROR SEHDisable(CPalThread *pthrCurrent) { #if HAVE_MACH_EXCEPTIONS return pthrCurrent->DisableMachExceptions(); #elif defined(__linux__) || defined(__FreeBSD__) || defined(__NetBSD__) || defined(__sun) return NO_ERROR; #else // HAVE_MACH_EXCEPTIONS #error not yet implemented #endif // HAVE_MACH_EXCEPTIONS } /*++ CatchHardwareExceptionHolder implementation --*/ extern "C" void PALAPI PAL_CatchHardwareExceptionHolderEnter() { CPalThread *pThread = InternalGetCurrentThread(); pThread->IncrementHardwareExceptionHolderCount(); } extern "C" void PALAPI PAL_CatchHardwareExceptionHolderExit() { CPalThread *pThread = InternalGetCurrentThread(); pThread->DecrementHardwareExceptionHolderCount(); } bool CatchHardwareExceptionHolder::IsEnabled() { CPalThread *pThread = GetCurrentPalThread(); return pThread ? pThread->IsHardwareExceptionsEnabled() : false; } /*++ NativeExceptionHolderBase implementation --*/ #if defined(__GNUC__) static __thread #else // __GNUC__ __declspec(thread) static #endif // !__GNUC__ NativeExceptionHolderBase *t_nativeExceptionHolderHead = nullptr; extern "C" NativeExceptionHolderBase ** PAL_GetNativeExceptionHolderHead() { return &t_nativeExceptionHolderHead; } NativeExceptionHolderBase * NativeExceptionHolderBase::FindNextHolder(NativeExceptionHolderBase *currentHolder, PVOID stackLowAddress, PVOID stackHighAddress) { NativeExceptionHolderBase *holder = (currentHolder == nullptr) ? t_nativeExceptionHolderHead : currentHolder->m_next; while (holder != nullptr) { if (((void *)holder >= stackLowAddress) && ((void *)holder < stackHighAddress)) { return holder; } // Get next holder holder = holder->m_next; } return nullptr; } #include "seh-unwind.cpp"
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*++ Module Name: seh.cpp Abstract: Implementation of exception API functions. --*/ #include "pal/thread.hpp" #include "pal/handleapi.hpp" #include "pal/seh.hpp" #include "pal/dbgmsg.h" #include "pal/critsect.h" #include "pal/debug.h" #include "pal/init.h" #include "pal/process.h" #include "pal/malloc.hpp" #include "pal/signal.hpp" #include "pal/virtual.h" #if HAVE_MACH_EXCEPTIONS #include "machexception.h" #else #include <signal.h> #endif #include <string.h> #include <unistd.h> #include <pthread.h> #include <stdlib.h> #include <utility> using namespace CorUnix; SET_DEFAULT_DEBUG_CHANNEL(EXCEPT); /* Constant and type definitions **********************************************/ /* Bit 28 of exception codes is reserved. */ const UINT RESERVED_SEH_BIT = 0x800000; /* Internal variables definitions **********************************************/ PHARDWARE_EXCEPTION_HANDLER g_hardwareExceptionHandler = NULL; // Function to check if an activation can be safely injected at a specified context PHARDWARE_EXCEPTION_SAFETY_CHECK_FUNCTION g_safeExceptionCheckFunction = NULL; PGET_GCMARKER_EXCEPTION_CODE g_getGcMarkerExceptionCode = NULL; // Return address of the SEHProcessException, which is used to enable walking over // the signal handler trampoline on some Unixes where the libunwind cannot do that. void* g_SEHProcessExceptionReturnAddress = NULL; /* Internal function definitions **********************************************/ /*++ Function : SEHInitialize Initialize all SEH-related stuff (signals, etc) Parameters : CPalThread * pthrCurrent : reference to the current thread. PAL initialize flags Return value : TRUE if SEH support initialization succeeded FALSE otherwise --*/ BOOL SEHInitialize (CPalThread *pthrCurrent, DWORD flags) { if (!SEHInitializeSignals(pthrCurrent, flags)) { ERROR("SEHInitializeSignals failed!\n"); SEHCleanup(); return FALSE; } return TRUE; } /*++ Function : SEHCleanup Undo work done by SEHInitialize Parameters : None (no return value) --*/ VOID SEHCleanup() { TRACE("Cleaning up SEH\n"); SEHCleanupSignals(); } /*++ Function: PAL_SetHardwareExceptionHandler Register a hardware exception handler. Parameters: handler - exception handler Return value: None --*/ VOID PALAPI PAL_SetHardwareExceptionHandler( IN PHARDWARE_EXCEPTION_HANDLER exceptionHandler, IN PHARDWARE_EXCEPTION_SAFETY_CHECK_FUNCTION exceptionCheckFunction) { g_hardwareExceptionHandler = exceptionHandler; g_safeExceptionCheckFunction = exceptionCheckFunction; } /*++ Function: PAL_SetGetGcMarkerExceptionCode Register a function that determines if the specified IP has code that is a GC marker for GCCover. Parameters: getGcMarkerExceptionCode - the function to register Return value: None --*/ VOID PALAPI PAL_SetGetGcMarkerExceptionCode( IN PGET_GCMARKER_EXCEPTION_CODE getGcMarkerExceptionCode) { g_getGcMarkerExceptionCode = getGcMarkerExceptionCode; } EXTERN_C void ThrowExceptionFromContextInternal(CONTEXT* context, PAL_SEHException* ex); /*++ Function: PAL_ThrowExceptionFromContext This function creates a stack frame right below the target frame, restores all callee saved registers from the passed in context, sets the RSP to that frame and sets the return address to the target frame's RIP. Then it uses the ThrowExceptionHelper to throw the passed in exception from that context. Parameters: CONTEXT* context - context from which the exception will be thrown PAL_SEHException* ex - the exception to throw. --*/ VOID PALAPI PAL_ThrowExceptionFromContext(CONTEXT* context, PAL_SEHException* ex) { // We need to make a copy of the exception off stack, since the "ex" is located in one of the stack // frames that will become obsolete by the ThrowExceptionFromContextInternal and the ThrowExceptionHelper // could overwrite the "ex" object by stack e.g. when allocating the low level exception object for "throw". static __thread BYTE threadLocalExceptionStorage[sizeof(PAL_SEHException)]; ThrowExceptionFromContextInternal(context, new (threadLocalExceptionStorage) PAL_SEHException(std::move(*ex))); } /*++ Function: ThrowExceptionHelper Helper function to throw the passed in exception. It is called from the assembler function ThrowExceptionFromContextInternal Parameters: PAL_SEHException* ex - the exception to throw. --*/ extern "C" #ifdef HOST_X86 void __fastcall ThrowExceptionHelper(PAL_SEHException* ex) #else // HOST_X86 void ThrowExceptionHelper(PAL_SEHException* ex) #endif // !HOST_X86 { throw std::move(*ex); } /*++ Function: EnsureExceptionRecordsOnHeap Helper function to move records from stack to heap. Parameters: PAL_SEHException* exception --*/ static void EnsureExceptionRecordsOnHeap(PAL_SEHException* exception) { if( !exception->RecordsOnStack || exception->ExceptionPointers.ExceptionRecord == NULL ) { return; } CONTEXT* contextRecord = exception->ExceptionPointers.ContextRecord; EXCEPTION_RECORD* exceptionRecord = exception->ExceptionPointers.ExceptionRecord; CONTEXT* contextRecordCopy; EXCEPTION_RECORD* exceptionRecordCopy; AllocateExceptionRecords(&exceptionRecordCopy, &contextRecordCopy); *exceptionRecordCopy = *exceptionRecord; *contextRecordCopy = *contextRecord; exception->ExceptionPointers.ExceptionRecord = exceptionRecordCopy; exception->ExceptionPointers.ContextRecord = contextRecordCopy; exception->RecordsOnStack = false; } /*++ Function: SEHProcessException Send the PAL exception to any handler registered. Parameters: PAL_SEHException* exception Return value: Returns TRUE if the exception happened in managed code and the execution should continue (with possibly modified context). Returns FALSE if the exception happened in managed code and it was not handled. In case the exception was handled by calling a catch handler, it doesn't return at all. --*/ BOOL SEHProcessException(PAL_SEHException* exception) { g_SEHProcessExceptionReturnAddress = __builtin_return_address(0); CONTEXT* contextRecord = exception->GetContextRecord(); EXCEPTION_RECORD* exceptionRecord = exception->GetExceptionRecord(); if (!IsInDebugBreak(exceptionRecord->ExceptionAddress)) { if (g_hardwareExceptionHandler != NULL) { _ASSERTE(g_safeExceptionCheckFunction != NULL); // Check if it is safe to handle the hardware exception (the exception happened in managed code // or in a jitter helper or it is a debugger breakpoint) if (g_safeExceptionCheckFunction(contextRecord, exceptionRecord)) { EnsureExceptionRecordsOnHeap(exception); if (g_hardwareExceptionHandler(exception)) { // The exception happened in managed code and the execution should continue. return TRUE; } // The exception was a single step or a breakpoint and it was not handled by the debugger. } } if (CatchHardwareExceptionHolder::IsEnabled()) { EnsureExceptionRecordsOnHeap(exception); PAL_ThrowExceptionFromContext(exception->GetContextRecord(), exception); } } // Unhandled hardware exception pointers->ExceptionRecord->ExceptionCode at pointers->ExceptionRecord->ExceptionAddress return FALSE; } /*++ Function : SEHEnable Enable SEH-related stuff on this thread Parameters: CPalThread * pthrCurrent : reference to the current thread. Return value : TRUE if enabling succeeded FALSE otherwise --*/ extern "C" PAL_ERROR SEHEnable(CPalThread *pthrCurrent) { #if HAVE_MACH_EXCEPTIONS return pthrCurrent->EnableMachExceptions(); #elif defined(__linux__) || defined(__FreeBSD__) || defined(__NetBSD__) || defined(__sun) return NO_ERROR; #else// HAVE_MACH_EXCEPTIONS #error not yet implemented #endif // HAVE_MACH_EXCEPTIONS } /*++ Function : SEHDisable Disable SEH-related stuff on this thread Parameters: CPalThread * pthrCurrent : reference to the current thread. Return value : TRUE if enabling succeeded FALSE otherwise --*/ extern "C" PAL_ERROR SEHDisable(CPalThread *pthrCurrent) { #if HAVE_MACH_EXCEPTIONS return pthrCurrent->DisableMachExceptions(); #elif defined(__linux__) || defined(__FreeBSD__) || defined(__NetBSD__) || defined(__sun) return NO_ERROR; #else // HAVE_MACH_EXCEPTIONS #error not yet implemented #endif // HAVE_MACH_EXCEPTIONS } /*++ CatchHardwareExceptionHolder implementation --*/ extern "C" void PALAPI PAL_CatchHardwareExceptionHolderEnter() { CPalThread *pThread = InternalGetCurrentThread(); pThread->IncrementHardwareExceptionHolderCount(); } extern "C" void PALAPI PAL_CatchHardwareExceptionHolderExit() { CPalThread *pThread = InternalGetCurrentThread(); pThread->DecrementHardwareExceptionHolderCount(); } bool CatchHardwareExceptionHolder::IsEnabled() { CPalThread *pThread = GetCurrentPalThread(); return pThread ? pThread->IsHardwareExceptionsEnabled() : false; } /*++ NativeExceptionHolderBase implementation --*/ #if defined(__GNUC__) static __thread #else // __GNUC__ __declspec(thread) static #endif // !__GNUC__ NativeExceptionHolderBase *t_nativeExceptionHolderHead = nullptr; extern "C" NativeExceptionHolderBase ** PAL_GetNativeExceptionHolderHead() { return &t_nativeExceptionHolderHead; } NativeExceptionHolderBase * NativeExceptionHolderBase::FindNextHolder(NativeExceptionHolderBase *currentHolder, PVOID stackLowAddress, PVOID stackHighAddress) { NativeExceptionHolderBase *holder = (currentHolder == nullptr) ? t_nativeExceptionHolderHead : currentHolder->m_next; while (holder != nullptr) { if (((void *)holder >= stackLowAddress) && ((void *)holder < stackHighAddress)) { return holder; } // Get next holder holder = holder->m_next; } return nullptr; } #include "seh-unwind.cpp"
-1
dotnet/runtime
66,367
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled
When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
jakobbotsch
2022-03-09T00:01:53Z
2022-03-09T12:59:09Z
c9f7f7389e8e9a00d501aef696333b67d218baac
cef825fd5425203ac28d073bf9fccc33c638f179
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled. When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
./src/coreclr/pal/tests/palsuite/c_runtime/vsprintf/test10/test10.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*===================================================================== ** ** Source: test10.c ** ** Purpose: Test #10 for the vsprintf function. ** ** **===================================================================*/ #include <palsuite.h> #include "../vsprintf.h" /* * Notes: memcmp is used, as is strlen. */ PALTEST(c_runtime_vsprintf_test10_paltest_vsprintf_test10, "c_runtime/vsprintf/test10/paltest_vsprintf_test10") { int neg = -42; int pos = 42; INT64 l = 42; if (PAL_Initialize(argc, argv) != 0) { return(FAIL); } DoNumTest("foo %o", pos, "foo 52"); DoNumTest("foo %lo", 0xFFFF, "foo 177777"); DoNumTest("foo %ho", 0xFFFF, "foo 177777"); DoNumTest("foo %Lo", pos, "foo 52"); DoI64Test("foo %I64o", l, "42", "foo 52"); DoNumTest("foo %3o", pos, "foo 52"); DoNumTest("foo %-3o", pos, "foo 52 "); DoNumTest("foo %.1o", pos, "foo 52"); DoNumTest("foo %.3o", pos, "foo 052"); DoNumTest("foo %03o", pos, "foo 052"); DoNumTest("foo %#o", pos, "foo 052"); DoNumTest("foo %+o", pos, "foo 52"); DoNumTest("foo % o", pos, "foo 52"); DoNumTest("foo %+o", neg, "foo 37777777726"); DoNumTest("foo % o", neg, "foo 37777777726"); PAL_Terminate(); return PASS; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*===================================================================== ** ** Source: test10.c ** ** Purpose: Test #10 for the vsprintf function. ** ** **===================================================================*/ #include <palsuite.h> #include "../vsprintf.h" /* * Notes: memcmp is used, as is strlen. */ PALTEST(c_runtime_vsprintf_test10_paltest_vsprintf_test10, "c_runtime/vsprintf/test10/paltest_vsprintf_test10") { int neg = -42; int pos = 42; INT64 l = 42; if (PAL_Initialize(argc, argv) != 0) { return(FAIL); } DoNumTest("foo %o", pos, "foo 52"); DoNumTest("foo %lo", 0xFFFF, "foo 177777"); DoNumTest("foo %ho", 0xFFFF, "foo 177777"); DoNumTest("foo %Lo", pos, "foo 52"); DoI64Test("foo %I64o", l, "42", "foo 52"); DoNumTest("foo %3o", pos, "foo 52"); DoNumTest("foo %-3o", pos, "foo 52 "); DoNumTest("foo %.1o", pos, "foo 52"); DoNumTest("foo %.3o", pos, "foo 052"); DoNumTest("foo %03o", pos, "foo 052"); DoNumTest("foo %#o", pos, "foo 052"); DoNumTest("foo %+o", pos, "foo 52"); DoNumTest("foo % o", pos, "foo 52"); DoNumTest("foo %+o", neg, "foo 37777777726"); DoNumTest("foo % o", neg, "foo 37777777726"); PAL_Terminate(); return PASS; }
-1
dotnet/runtime
66,367
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled
When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
jakobbotsch
2022-03-09T00:01:53Z
2022-03-09T12:59:09Z
c9f7f7389e8e9a00d501aef696333b67d218baac
cef825fd5425203ac28d073bf9fccc33c638f179
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled. When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
./src/coreclr/ilasm/prebuilt/asmparse.cpp
/* * Created by Microsoft VCBU Internal YACC from "asmparse.y" */ #line 2 "asmparse.y" // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // File asmparse.y // #include "ilasmpch.h" #include "grammar_before.cpp" #line 16 "asmparse.y" #define UNION 1 typedef union { CorRegTypeAttr classAttr; CorMethodAttr methAttr; CorFieldAttr fieldAttr; CorMethodImpl implAttr; CorEventAttr eventAttr; CorPropertyAttr propAttr; CorPinvokeMap pinvAttr; CorDeclSecurity secAct; CorFileFlags fileAttr; CorAssemblyFlags asmAttr; CorAssemblyFlags asmRefAttr; CorTypeAttr exptAttr; CorManifestResourceFlags manresAttr; double* float64; __int64* int64; __int32 int32; char* string; BinStr* binstr; Labels* labels; Instr* instr; // instruction opcode NVPair* pair; pTyParList typarlist; mdToken token; TypeDefDescr* tdd; CustomDescr* cad; unsigned short opcode; } YYSTYPE; # define ERROR_ 257 # define BAD_COMMENT_ 258 # define BAD_LITERAL_ 259 # define ID 260 # define DOTTEDNAME 261 # define QSTRING 262 # define SQSTRING 263 # define INT32 264 # define INT64 265 # define FLOAT64 266 # define HEXBYTE 267 # define TYPEDEF_T 268 # define TYPEDEF_M 269 # define TYPEDEF_F 270 # define TYPEDEF_TS 271 # define TYPEDEF_MR 272 # define TYPEDEF_CA 273 # define DCOLON 274 # define ELIPSIS 275 # define VOID_ 276 # define BOOL_ 277 # define CHAR_ 278 # define UNSIGNED_ 279 # define INT_ 280 # define INT8_ 281 # define INT16_ 282 # define INT32_ 283 # define INT64_ 284 # define FLOAT_ 285 # define FLOAT32_ 286 # define FLOAT64_ 287 # define BYTEARRAY_ 288 # define UINT_ 289 # define UINT8_ 290 # define UINT16_ 291 # define UINT32_ 292 # define UINT64_ 293 # define FLAGS_ 294 # define CALLCONV_ 295 # define MDTOKEN_ 296 # define OBJECT_ 297 # define STRING_ 298 # define NULLREF_ 299 # define DEFAULT_ 300 # define CDECL_ 301 # define VARARG_ 302 # define STDCALL_ 303 # define THISCALL_ 304 # define FASTCALL_ 305 # define CLASS_ 306 # define TYPEDREF_ 307 # define UNMANAGED_ 308 # define FINALLY_ 309 # define HANDLER_ 310 # define CATCH_ 311 # define FILTER_ 312 # define FAULT_ 313 # define EXTENDS_ 314 # define IMPLEMENTS_ 315 # define TO_ 316 # define AT_ 317 # define TLS_ 318 # define TRUE_ 319 # define FALSE_ 320 # define _INTERFACEIMPL 321 # define VALUE_ 322 # define VALUETYPE_ 323 # define NATIVE_ 324 # define INSTANCE_ 325 # define SPECIALNAME_ 326 # define FORWARDER_ 327 # define STATIC_ 328 # define PUBLIC_ 329 # define PRIVATE_ 330 # define FAMILY_ 331 # define FINAL_ 332 # define SYNCHRONIZED_ 333 # define INTERFACE_ 334 # define SEALED_ 335 # define NESTED_ 336 # define ABSTRACT_ 337 # define AUTO_ 338 # define SEQUENTIAL_ 339 # define EXPLICIT_ 340 # define ANSI_ 341 # define UNICODE_ 342 # define AUTOCHAR_ 343 # define IMPORT_ 344 # define ENUM_ 345 # define VIRTUAL_ 346 # define NOINLINING_ 347 # define AGGRESSIVEINLINING_ 348 # define NOOPTIMIZATION_ 349 # define AGGRESSIVEOPTIMIZATION_ 350 # define UNMANAGEDEXP_ 351 # define BEFOREFIELDINIT_ 352 # define STRICT_ 353 # define RETARGETABLE_ 354 # define WINDOWSRUNTIME_ 355 # define NOPLATFORM_ 356 # define METHOD_ 357 # define FIELD_ 358 # define PINNED_ 359 # define MODREQ_ 360 # define MODOPT_ 361 # define SERIALIZABLE_ 362 # define PROPERTY_ 363 # define TYPE_ 364 # define ASSEMBLY_ 365 # define FAMANDASSEM_ 366 # define FAMORASSEM_ 367 # define PRIVATESCOPE_ 368 # define HIDEBYSIG_ 369 # define NEWSLOT_ 370 # define RTSPECIALNAME_ 371 # define PINVOKEIMPL_ 372 # define _CTOR 373 # define _CCTOR 374 # define LITERAL_ 375 # define NOTSERIALIZED_ 376 # define INITONLY_ 377 # define REQSECOBJ_ 378 # define CIL_ 379 # define OPTIL_ 380 # define MANAGED_ 381 # define FORWARDREF_ 382 # define PRESERVESIG_ 383 # define RUNTIME_ 384 # define INTERNALCALL_ 385 # define _IMPORT 386 # define NOMANGLE_ 387 # define LASTERR_ 388 # define WINAPI_ 389 # define AS_ 390 # define BESTFIT_ 391 # define ON_ 392 # define OFF_ 393 # define CHARMAPERROR_ 394 # define INSTR_NONE 395 # define INSTR_VAR 396 # define INSTR_I 397 # define INSTR_I8 398 # define INSTR_R 399 # define INSTR_BRTARGET 400 # define INSTR_METHOD 401 # define INSTR_FIELD 402 # define INSTR_TYPE 403 # define INSTR_STRING 404 # define INSTR_SIG 405 # define INSTR_TOK 406 # define INSTR_SWITCH 407 # define _CLASS 408 # define _NAMESPACE 409 # define _METHOD 410 # define _FIELD 411 # define _DATA 412 # define _THIS 413 # define _BASE 414 # define _NESTER 415 # define _EMITBYTE 416 # define _TRY 417 # define _MAXSTACK 418 # define _LOCALS 419 # define _ENTRYPOINT 420 # define _ZEROINIT 421 # define _EVENT 422 # define _ADDON 423 # define _REMOVEON 424 # define _FIRE 425 # define _OTHER 426 # define _PROPERTY 427 # define _SET 428 # define _GET 429 # define _PERMISSION 430 # define _PERMISSIONSET 431 # define REQUEST_ 432 # define DEMAND_ 433 # define ASSERT_ 434 # define DENY_ 435 # define PERMITONLY_ 436 # define LINKCHECK_ 437 # define INHERITCHECK_ 438 # define REQMIN_ 439 # define REQOPT_ 440 # define REQREFUSE_ 441 # define PREJITGRANT_ 442 # define PREJITDENY_ 443 # define NONCASDEMAND_ 444 # define NONCASLINKDEMAND_ 445 # define NONCASINHERITANCE_ 446 # define _LINE 447 # define P_LINE 448 # define _LANGUAGE 449 # define _CUSTOM 450 # define INIT_ 451 # define _SIZE 452 # define _PACK 453 # define _VTABLE 454 # define _VTFIXUP 455 # define FROMUNMANAGED_ 456 # define CALLMOSTDERIVED_ 457 # define _VTENTRY 458 # define RETAINAPPDOMAIN_ 459 # define _FILE 460 # define NOMETADATA_ 461 # define _HASH 462 # define _ASSEMBLY 463 # define _PUBLICKEY 464 # define _PUBLICKEYTOKEN 465 # define ALGORITHM_ 466 # define _VER 467 # define _LOCALE 468 # define EXTERN_ 469 # define _MRESOURCE 470 # define _MODULE 471 # define _EXPORT 472 # define LEGACY_ 473 # define LIBRARY_ 474 # define X86_ 475 # define AMD64_ 476 # define ARM_ 477 # define ARM64_ 478 # define MARSHAL_ 479 # define CUSTOM_ 480 # define SYSSTRING_ 481 # define FIXED_ 482 # define VARIANT_ 483 # define CURRENCY_ 484 # define SYSCHAR_ 485 # define DECIMAL_ 486 # define DATE_ 487 # define BSTR_ 488 # define TBSTR_ 489 # define LPSTR_ 490 # define LPWSTR_ 491 # define LPTSTR_ 492 # define OBJECTREF_ 493 # define IUNKNOWN_ 494 # define IDISPATCH_ 495 # define STRUCT_ 496 # define SAFEARRAY_ 497 # define BYVALSTR_ 498 # define LPVOID_ 499 # define ANY_ 500 # define ARRAY_ 501 # define LPSTRUCT_ 502 # define IIDPARAM_ 503 # define IN_ 504 # define OUT_ 505 # define OPT_ 506 # define _PARAM 507 # define _OVERRIDE 508 # define WITH_ 509 # define NULL_ 510 # define HRESULT_ 511 # define CARRAY_ 512 # define USERDEFINED_ 513 # define RECORD_ 514 # define FILETIME_ 515 # define BLOB_ 516 # define STREAM_ 517 # define STORAGE_ 518 # define STREAMED_OBJECT_ 519 # define STORED_OBJECT_ 520 # define BLOB_OBJECT_ 521 # define CF_ 522 # define CLSID_ 523 # define VECTOR_ 524 # define _SUBSYSTEM 525 # define _CORFLAGS 526 # define ALIGNMENT_ 527 # define _IMAGEBASE 528 # define _STACKRESERVE 529 # define _TYPEDEF 530 # define _TEMPLATE 531 # define _TYPELIST 532 # define _MSCORLIB 533 # define P_DEFINE 534 # define P_UNDEF 535 # define P_IFDEF 536 # define P_IFNDEF 537 # define P_ELSE 538 # define P_ENDIF 539 # define P_INCLUDE 540 # define CONSTRAINT_ 541 #define yyclearin yychar = -1 #define yyerrok yyerrflag = 0 #ifndef YYMAXDEPTH #define YYMAXDEPTH 150 #endif YYSTYPE yylval, yyval; #ifndef YYFARDATA #define YYFARDATA /*nothing*/ #endif #if ! defined YYSTATIC #define YYSTATIC /*nothing*/ #endif #if ! defined YYCONST #define YYCONST /*nothing*/ #endif #ifndef YYACT #define YYACT yyact #endif #ifndef YYPACT #define YYPACT yypact #endif #ifndef YYPGO #define YYPGO yypgo #endif #ifndef YYR1 #define YYR1 yyr1 #endif #ifndef YYR2 #define YYR2 yyr2 #endif #ifndef YYCHK #define YYCHK yychk #endif #ifndef YYDEF #define YYDEF yydef #endif #ifndef YYV #define YYV yyv #endif #ifndef YYS #define YYS yys #endif #ifndef YYLOCAL #define YYLOCAL #endif #ifndef YYR_T #define YYR_T int #endif typedef YYR_T yyr_t; #ifndef YYEXIND_T #define YYEXIND_T unsigned int #endif typedef YYEXIND_T yyexind_t; #ifndef YYOPTTIME #define YYOPTTIME 0 #endif # define YYERRCODE 256 #line 2062 "asmparse.y" #include "grammar_after.cpp" YYSTATIC YYCONST short yyexca[] = { #if !(YYOPTTIME) -1, 1, #endif 0, -1, -2, 0, #if !(YYOPTTIME) -1, 452, #endif 41, 538, -2, 311, #if !(YYOPTTIME) -1, 622, #endif 274, 555, 47, 555, -2, 230, #if !(YYOPTTIME) -1, 643, #endif 40, 310, 60, 310, -2, 555, #if !(YYOPTTIME) -1, 665, #endif 41, 538, -2, 311, #if !(YYOPTTIME) -1, 690, #endif 274, 555, 47, 555, -2, 516, #if !(YYOPTTIME) -1, 809, #endif 123, 235, -2, 555, #if !(YYOPTTIME) -1, 836, #endif 41, 538, -2, 311, #if !(YYOPTTIME) -1, 961, #endif 41, 538, -2, 311, #if !(YYOPTTIME) -1, 994, #endif 41, 538, -2, 311, #if !(YYOPTTIME) -1, 995, #endif 41, 538, -2, 311, #if !(YYOPTTIME) -1, 1323, #endif 41, 538, -2, 311, #if !(YYOPTTIME) -1, 1324, #endif 41, 538, -2, 311, #if !(YYOPTTIME) -1, 1331, #endif 41, 538, -2, 311, #if !(YYOPTTIME) -1, 1339, #endif 41, 538, -2, 311, #if !(YYOPTTIME) -1, 1465, #endif 41, 538, -2, 311, #if !(YYOPTTIME) -1, 1497, #endif 41, 538, -2, 311, #if !(YYOPTTIME) -1, 1564, #endif 41, 538, -2, 311, #if !(YYOPTTIME) -1, 1581, #endif 41, 538, -2, 311, }; # define YYNPROD 844 #if YYOPTTIME YYSTATIC YYCONST yyexind_t yyexcaind[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 24, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 38, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 42, 46, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 50, 54, 0, 0, 0, 0, 0, 0, 58, 0, 0, 0, 0, 0, 0, 0, 62, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 70, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 74, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 78 }; #endif # define YYLAST 3922 YYSTATIC YYCONST short YYFARDATA YYACT[] = { 703, 1484, 414, 1416, 1133, 640, 660, 191, 1482, 1485, 886, 1036, 971, 1483, 702, 788, 779, 885, 974, 729, 73, 75, 150, 625, 1521, 536, 1417, 792, 755, 190, 760, 757, 478, 176, 107, 972, 1146, 110, 106, 694, 1077, 275, 860, 604, 662, 599, 273, 78, 81, 219, 44, 24, 780, 262, 516, 214, 204, 7, 301, 188, 654, 76, 6, 991, 85, 5, 1569, 3, 1206, 1253, 220, 1125, 18, 1257, 115, 677, 1069, 264, 153, 1254, 307, 133, 74, 178, 179, 180, 181, 272, 1123, 218, 136, 10, 221, 300, 26, 137, 1070, 98, 278, 139, 217, 1124, 17, 202, 203, 581, 269, 74, 719, 716, 322, 265, 461, 113, 112, 700, 520, 939, 940, 352, 343, 88, 87, 353, 89, 462, 1255, 452, 1025, 268, 676, 338, 56, 537, 68, 1243, 1244, 305, 591, 88, 87, 357, 89, 277, 225, 327, 277, 368, 339, 1031, 342, 938, 361, 366, 185, 154, 1241, 1242, 98, 360, 359, 358, 277, 56, 88, 87, 345, 89, 656, 1573, 192, 782, 351, 783, 348, 365, 1537, 277, 86, 1039, 369, 310, 312, 314, 316, 318, 374, 277, 362, 198, 1038, 364, 699, 373, 199, 271, 200, 698, 1432, 84, 105, 379, 201, 417, 418, 363, 376, 1138, 1139, 450, 387, 451, 615, 88, 87, 186, 89, 388, 480, 195, 814, 1071, 663, 1505, 456, 767, 1279, 457, 1578, 999, 258, 473, 475, 416, 196, 1496, 484, 655, 481, 482, 470, 491, 468, 472, 471, 501, 346, 216, 495, 1329, 597, 1214, 192, 493, 441, 24, 833, 476, 479, 375, 433, 7, 1278, 56, 801, 813, 486, 432, 664, 74, 428, 492, 429, 483, 541, 436, 18, 641, 642, 487, 586, 376, 585, 584, 544, 56, 1354, 1355, 1356, 587, 941, 942, 267, 943, 435, 10, 154, 442, 26, 1337, 1336, 1335, 1334, 494, 791, 434, 17, 777, 542, 572, 74, 545, 714, 668, 575, 268, 576, 1249, 577, 1517, 499, 600, 862, 863, 864, 579, 580, 371, 370, 1009, 116, 511, 511, 528, 534, 108, 574, 372, 258, 571, 266, 549, 573, 192, 488, 367, 88, 87, 154, 89, 321, 1559, 601, 512, 512, 529, 535, 80, 79, 480, 198, 505, 152, 410, 1435, 199, 74, 200, 613, 1560, 348, 582, 583, 201, 74, 1130, 480, 46, 498, 481, 482, 375, 621, 88, 596, 459, 89, 624, 80, 79, 195, 1563, 607, 608, 609, 610, 481, 482, 1562, 606, 420, 614, 421, 422, 423, 196, 745, 1368, 612, 474, 611, 619, 620, 622, 485, 340, 341, 678, 1352, 639, 644, 74, 1561, 500, 88, 87, 1248, 89, 1128, 955, 56, 1153, 600, 595, 784, 135, 1154, 759, 649, 650, 354, 355, 356, 652, 635, 1507, 1536, 348, 884, 182, 643, 785, 56, 704, 1138, 1139, 666, 1408, 855, 321, 177, 88, 674, 1140, 89, 74, 669, 1531, 970, 1514, 685, 682, 951, 347, 566, 88, 1246, 46, 89, 74, 538, 1506, 1267, 569, 1528, 867, 1362, 747, 88, 87, 689, 89, 74, 671, 673, 1192, 1358, 1142, 696, 88, 87, 786, 89, 990, 989, 988, 588, 983, 982, 981, 980, 758, 978, 979, 105, 706, 987, 986, 985, 984, 537, 690, 1468, 977, 975, 651, 715, 1001, 1002, 1003, 1004, 88, 87, 692, 954, 177, 376, 693, 453, 155, 648, 705, 80, 79, 480, 683, 701, 546, 727, 707, 805, 61, 62, 47, 63, 709, 803, 710, 713, 1256, 861, 539, 460, 645, 481, 482, 718, 177, 1520, 647, 56, 1530, 88, 87, 225, 89, 74, 723, 1191, 659, 728, 646, 967, 730, 277, 1529, 724, 597, 725, 675, 976, 720, 80, 79, 1129, 762, 679, 680, 681, 413, 734, 82, 506, 1526, 56, 768, 769, 49, 50, 51, 52, 53, 54, 55, 74, 639, 1262, 1258, 1259, 1260, 1261, 74, 754, 1524, 601, 744, 733, 748, 749, 750, 1013, 98, 1011, 1012, 787, 543, 502, 72, 49, 50, 51, 52, 53, 54, 55, 74, 643, 74, 684, 1522, 477, 61, 62, 47, 63, 88, 87, 542, 89, 807, 808, 802, 71, 751, 752, 753, 812, 793, 821, 74, 514, 825, 819, 826, 822, 74, 695, 216, 70, 830, 1184, 1183, 1182, 1181, 156, 157, 158, 831, 804, 806, 74, 809, 74, 815, 480, 773, 774, 775, 790, 325, 841, 842, 823, 797, 69, 800, 818, 80, 79, 67, 377, 824, 832, 324, 481, 482, 348, 348, 854, 88, 87, 225, 89, 834, 858, 88, 87, 865, 89, 1153, 375, 672, 66, 930, 1154, 627, 628, 629, 49, 50, 51, 52, 53, 54, 55, 192, 944, 945, 868, 56, 853, 1153, 277, 856, 88, 87, 1154, 89, 74, 857, 49, 50, 51, 52, 53, 54, 55, 601, 957, 600, 1457, 630, 631, 632, 950, 1076, 1072, 1073, 1074, 1075, 1455, 946, 152, 1344, 46, 382, 383, 384, 385, 111, 177, 80, 79, 852, 74, 88, 87, 993, 89, 859, 348, 773, 88, 87, 1021, 89, 1022, 1019, 74, 1453, 362, 963, 956, 960, 966, 1018, 1451, 932, 46, 933, 934, 935, 936, 937, 216, 823, 74, 1032, 1434, 593, 1035, 637, 276, 1343, 968, 823, 606, 766, 997, 696, 696, 496, 1026, 1044, 1020, 1425, 74, 441, 1007, 1016, 1424, 1422, 1407, 433, 1027, 962, 1049, 829, 1024, 1047, 432, 1029, 1028, 428, 517, 429, 1051, 1042, 436, 1006, 1014, 528, 74, 1405, 80, 79, 480, 840, 1045, 1046, 1395, 145, 973, 519, 1411, 670, 765, 435, 1005, 1015, 442, 1008, 1017, 529, 823, 1062, 481, 482, 434, 1057, 88, 87, 337, 89, 1393, 49, 50, 51, 52, 53, 54, 55, 592, 277, 636, 277, 1391, 326, 323, 56, 152, 1389, 1067, 1387, 1385, 277, 1383, 49, 50, 51, 52, 53, 54, 55, 1381, 1379, 1376, 1373, 962, 1371, 1367, 41, 43, 1351, 1327, 1209, 1208, 1056, 996, 1055, 1134, 88, 87, 1315, 89, 762, 1079, 1054, 1080, 155, 776, 63, 722, 46, 1053, 543, 1034, 1033, 1144, 828, 1150, 1252, 616, 504, 820, 513, 737, 1131, 508, 509, 618, 1136, 617, 1141, 578, 522, 527, 177, 565, 1065, 1251, 308, 455, 1313, 109, 63, 1137, 1197, 92, 1145, 1198, 1195, 1196, 964, 1316, 770, 1037, 520, 1311, 513, 521, 1349, 508, 509, 1309, 1187, 1143, 695, 695, 992, 1194, 1193, 145, 1207, 1190, 953, 1210, 1185, 1, 1418, 1189, 1199, 1200, 1201, 1202, 1179, 1177, 1175, 1066, 589, 1234, 1203, 1204, 1205, 1314, 49, 50, 51, 52, 53, 54, 55, 712, 348, 88, 87, 1217, 89, 626, 1312, 590, 1211, 1245, 1063, 1152, 1310, 1188, 1247, 152, 1218, 1239, 1173, 1171, 1169, 145, 1238, 1237, 1240, 1186, 49, 50, 51, 52, 53, 54, 55, 1180, 1178, 1176, 88, 87, 348, 89, 74, 1167, 1165, 205, 155, 525, 352, 1250, 711, 708, 353, 156, 157, 158, 1163, 192, 192, 192, 192, 1135, 634, 277, 1161, 277, 1127, 192, 192, 192, 357, 1174, 1172, 1170, 177, 591, 412, 378, 1433, 626, 1263, 192, 46, 1159, 187, 794, 97, 88, 87, 63, 89, 1157, 1430, 949, 1168, 1166, 1508, 1138, 1139, 524, 1155, 351, 526, 317, 1266, 527, 1280, 1164, 1284, 315, 1286, 1288, 1289, 1270, 1292, 1162, 1294, 1295, 1296, 1297, 1298, 1299, 1300, 1274, 1302, 1303, 1304, 1305, 1306, 1307, 1308, 1269, 1429, 313, 1160, 1317, 1318, 1291, 1320, 1293, 1428, 1319, 1158, 1287, 1285, 1290, 152, 1301, 311, 1419, 1236, 1156, 309, 1282, 308, 1060, 1322, 1212, 306, 1213, 308, 1059, 844, 1328, 746, 667, 1233, 1333, 352, 1332, 45, 94, 353, 49, 50, 51, 52, 53, 54, 55, 454, 328, 329, 330, 308, 415, 88, 87, 277, 89, 357, 156, 157, 158, 155, 591, 1558, 823, 1345, 308, 1338, 1347, 1348, 308, 352, 1331, 332, 1330, 353, 308, 56, 277, 152, 1272, 1353, 1357, 1147, 525, 277, 140, 1215, 351, 177, 952, 1360, 1283, 357, 948, 1023, 1359, 277, 827, 1281, 277, 138, 1350, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 1470, 839, 591, 1469, 838, 1361, 351, 817, 63, 1397, 1398, 1399, 1400, 1401, 1402, 1403, 591, 1346, 524, 771, 764, 526, 1364, 258, 721, 796, 567, 117, 1409, 1410, 349, 1414, 1396, 1413, 352, 1571, 1415, 591, 772, 1043, 591, 591, 56, 1421, 1420, 931, 1583, 591, 1426, 331, 303, 333, 334, 335, 336, 1574, 357, 1152, 1572, 1565, 1541, 823, 1504, 1503, 88, 87, 1502, 89, 1412, 1472, 1467, 134, 155, 1464, 1341, 96, 1423, 1461, 104, 103, 102, 101, 1460, 99, 100, 105, 1456, 351, 156, 157, 158, 1454, 49, 50, 51, 52, 53, 54, 55, 1452, 177, 1450, 1437, 1509, 1431, 1427, 258, 1581, 697, 1406, 1510, 1404, 1394, 1462, 1392, 1463, 1390, 1388, 354, 355, 356, 97, 352, 1386, 836, 56, 353, 1473, 1474, 1475, 88, 87, 1466, 89, 1471, 1384, 531, 1382, 155, 88, 87, 1380, 89, 1378, 357, 1377, 1375, 1374, 1488, 350, 1152, 1372, 1370, 1486, 56, 1369, 1366, 1489, 1494, 1487, 88, 1365, 1566, 89, 1342, 1340, 177, 1476, 1326, 1325, 1499, 1321, 1271, 46, 1498, 351, 1363, 1268, 1235, 1516, 56, 1126, 1523, 1525, 1527, 1064, 1523, 1525, 1527, 258, 1050, 206, 1534, 1525, 1048, 1041, 1040, 1532, 1538, 1501, 1539, 1535, 1540, 1533, 1519, 1030, 969, 959, 958, 947, 866, 1515, 1518, 1513, 849, 848, 846, 156, 157, 158, 449, 843, 193, 1511, 837, 194, 835, 816, 823, 795, 778, 742, 1523, 1525, 1527, 741, 740, 739, 354, 355, 356, 738, 736, 88, 735, 688, 89, 638, 198, 177, 570, 425, 424, 199, 344, 200, 46, 320, 1564, 1497, 1493, 201, 1492, 1491, 1490, 1570, 1465, 1459, 1458, 1568, 1449, 1448, 1447, 1446, 354, 355, 356, 1577, 195, 1575, 1445, 1580, 1579, 156, 157, 158, 1582, 1444, 1567, 1443, 1442, 1441, 1440, 196, 1439, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 1438, 1436, 823, 1315, 59, 523, 1313, 1576, 208, 259, 210, 228, 212, 213, 1311, 1309, 1339, 56, 88, 882, 1187, 89, 41, 43, 56, 1185, 876, 1179, 877, 878, 879, 46, 1177, 1175, 1173, 1171, 515, 1169, 1167, 61, 62, 47, 63, 1165, 1163, 1161, 1324, 354, 355, 356, 223, 1323, 1265, 96, 1264, 56, 104, 103, 102, 101, 46, 99, 100, 105, 222, 1078, 871, 872, 873, 1068, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 998, 1061, 1052, 46, 59, 995, 994, 426, 208, 259, 210, 228, 212, 213, 961, 851, 226, 224, 1058, 850, 847, 845, 41, 43, 732, 731, 717, 691, 687, 870, 874, 875, 686, 880, 665, 633, 881, 603, 530, 61, 62, 47, 63, 49, 50, 51, 52, 53, 54, 55, 223, 602, 354, 355, 356, 594, 568, 548, 547, 497, 419, 411, 386, 319, 222, 304, 518, 302, 510, 507, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 503, 36, 184, 93, 59, 33, 469, 467, 208, 259, 210, 228, 212, 213, 466, 465, 226, 224, 95, 464, 244, 463, 41, 43, 227, 243, 215, 209, 1098, 38, 30, 58, 32, 59, 207, 211, 887, 31, 1010, 61, 62, 47, 63, 49, 50, 51, 52, 53, 54, 55, 223, 41, 43, 1000, 439, 38, 30, 58, 32, 59, 869, 799, 431, 798, 222, 46, 430, 427, 61, 62, 47, 63, 46, 540, 270, 60, 35, 41, 43, 83, 29, 21, 57, 34, 37, 25, 16, 263, 15, 189, 14, 39, 40, 261, 61, 62, 47, 63, 13, 226, 224, 60, 35, 46, 260, 12, 11, 21, 9, 8, 37, 4, 2, 444, 234, 242, 241, 39, 40, 240, 444, 239, 238, 237, 236, 235, 49, 50, 51, 52, 53, 54, 55, 233, 232, 231, 230, 229, 114, 77, 42, 756, 658, 657, 1500, 299, 19, 20, 90, 22, 23, 48, 183, 27, 28, 49, 50, 51, 52, 53, 54, 55, 1151, 761, 789, 1273, 965, 1149, 1148, 605, 1479, 1478, 19, 20, 1477, 22, 23, 48, 1495, 27, 28, 49, 50, 51, 52, 53, 54, 55, 882, 1481, 1480, 1216, 1132, 598, 661, 876, 781, 877, 878, 879, 448, 91, 58, 32, 59, 1081, 743, 448, 65, 58, 32, 59, 64, 197, 445, 883, 0, 0, 0, 446, 0, 445, 41, 43, 929, 0, 446, 0, 0, 41, 43, 0, 0, 0, 0, 871, 872, 873, 0, 61, 62, 47, 63, 1109, 437, 438, 61, 62, 47, 63, 0, 437, 438, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1085, 1086, 447, 1093, 1107, 1087, 1088, 1089, 1090, 447, 1091, 1092, 0, 1108, 1094, 1095, 1096, 1097, 63, 870, 874, 875, 0, 880, 0, 0, 881, 0, 532, 0, 0, 533, 0, 0, 0, 0, 0, 443, 440, 0, 0, 0, 0, 0, 443, 440, 0, 0, 0, 0, 0, 882, 0, 0, 0, 0, 193, 0, 876, 194, 877, 878, 879, 0, 49, 50, 51, 52, 53, 54, 55, 49, 50, 51, 52, 53, 54, 55, 0, 0, 0, 0, 198, 177, 0, 0, 0, 199, 0, 200, 0, 0, 0, 0, 0, 201, 901, 0, 871, 872, 873, 0, 49, 50, 51, 52, 53, 54, 55, 146, 928, 0, 195, 0, 0, 893, 894, 0, 902, 919, 895, 896, 897, 898, 0, 899, 900, 196, 920, 903, 904, 905, 906, 0, 0, 0, 0, 348, 0, 0, 0, 0, 0, 0, 870, 874, 875, 0, 880, 0, 0, 881, 1232, 1231, 1226, 0, 1225, 1224, 1223, 1222, 0, 1220, 1221, 105, 0, 1230, 1229, 1228, 1227, 0, 0, 0, 0, 917, 1219, 921, 0, 990, 989, 988, 923, 983, 982, 981, 980, 0, 978, 979, 105, 0, 987, 986, 985, 984, 0, 0, 925, 977, 975, 0, 1542, 0, 0, 0, 0, 0, 0, 1083, 1084, 0, 1099, 1100, 1101, 0, 1102, 1103, 0, 0, 1104, 1105, 0, 1106, 0, 0, 0, 0, 0, 0, 0, 926, 0, 0, 0, 0, 1082, 1110, 1111, 1112, 1113, 1114, 1115, 1116, 1117, 1118, 1119, 1120, 1121, 1122, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 901, 0, 0, 976, 0, 0, 1512, 0, 0, 0, 0, 0, 0, 0, 928, 0, 0, 0, 0, 893, 894, 0, 902, 919, 895, 896, 897, 898, 0, 899, 900, 0, 920, 903, 904, 905, 906, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 146, 0, 0, 0, 0, 888, 0, 889, 890, 891, 892, 907, 908, 909, 924, 910, 911, 912, 913, 914, 915, 916, 918, 922, 917, 0, 921, 927, 0, 0, 0, 923, 0, 175, 0, 0, 0, 151, 148, 163, 161, 170, 0, 164, 165, 166, 167, 925, 168, 169, 0, 0, 171, 172, 173, 174, 564, 1557, 152, 142, 162, 0, 0, 0, 0, 0, 0, 0, 141, 147, 0, 1547, 0, 0, 0, 0, 0, 0, 0, 0, 926, 0, 0, 0, 143, 144, 149, 1543, 556, 0, 550, 551, 552, 553, 0, 0, 1552, 0, 0, 146, 0, 0, 0, 0, 352, 0, 0, 0, 772, 0, 1553, 1554, 1555, 1556, 0, 0, 0, 0, 0, 160, 0, 0, 0, 0, 0, 0, 357, 558, 559, 560, 561, 0, 0, 555, 0, 0, 0, 562, 563, 554, 0, 0, 1544, 1545, 1546, 1548, 1549, 1550, 1551, 0, 0, 0, 0, 0, 0, 0, 0, 623, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 888, 0, 889, 890, 891, 892, 907, 908, 909, 924, 910, 911, 912, 913, 914, 915, 916, 918, 922, 990, 989, 988, 927, 983, 982, 981, 980, 0, 978, 979, 105, 0, 987, 986, 985, 984, 0, 0, 0, 977, 975, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 88, 87, 0, 89, 0, 0, 0, 0, 155, 0, 0, 175, 0, 0, 557, 151, 148, 163, 161, 170, 0, 164, 165, 166, 167, 0, 168, 169, 0, 0, 171, 172, 173, 174, 146, 0, 177, 142, 162, 352, 0, 0, 0, 353, 0, 0, 141, 147, 0, 976, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 357, 143, 144, 149, 0, 0, 175, 0, 0, 0, 0, 0, 163, 161, 170, 0, 164, 165, 166, 167, 0, 168, 169, 0, 0, 171, 172, 173, 174, 0, 0, 623, 1276, 162, 0, 0, 160, 159, 88, 87, 0, 89, 0, 0, 0, 0, 155, 0, 0, 175, 0, 0, 0, 151, 148, 163, 161, 170, 0, 164, 165, 166, 167, 0, 168, 169, 0, 0, 171, 172, 173, 174, 0, 0, 177, 142, 162, 0, 0, 0, 1277, 0, 0, 0, 141, 147, 0, 0, 0, 0, 297, 198, 156, 157, 158, 0, 199, 0, 200, 1275, 143, 144, 149, 0, 201, 0, 0, 0, 0, 0, 0, 0, 0, 0, 146, 0, 0, 0, 0, 352, 0, 195, 284, 353, 279, 280, 281, 282, 283, 63, 0, 0, 0, 287, 0, 160, 196, 354, 355, 356, 0, 357, 285, 0, 0, 0, 0, 295, 0, 286, 0, 641, 642, 0, 0, 0, 0, 0, 0, 0, 0, 288, 289, 290, 291, 292, 293, 294, 298, 0, 0, 0, 623, 0, 296, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 156, 157, 158, 0, 0, 0, 0, 0, 0, 88, 87, 0, 89, 0, 0, 0, 0, 155, 146, 0, 175, 0, 0, 0, 151, 148, 163, 161, 170, 0, 164, 165, 166, 167, 0, 168, 169, 0, 0, 171, 172, 173, 174, 0, 0, 177, 142, 162, 0, 0, 0, 0, 0, 0, 0, 141, 147, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 143, 144, 149, 0, 0, 274, 0, 0, 0, 0, 0, 0, 0, 0, 0, 146, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 160, 0, 354, 355, 356, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 641, 642, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 274, 0, 0, 0, 0, 0, 88, 87, 0, 89, 0, 0, 0, 0, 155, 0, 0, 175, 156, 157, 158, 151, 148, 163, 161, 170, 0, 164, 165, 166, 167, 0, 168, 169, 0, 0, 171, 172, 173, 174, 0, 146, 177, 142, 162, 0, 0, 0, 0, 0, 0, 0, 141, 147, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 143, 144, 149, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 274, 0, 88, 87, 160, 89, 354, 355, 356, 0, 155, 146, 0, 175, 0, 0, 0, 151, 148, 163, 161, 170, 0, 164, 165, 166, 167, 0, 168, 169, 0, 0, 171, 172, 173, 174, 0, 0, 177, 142, 162, 0, 0, 0, 0, 0, 0, 0, 141, 147, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 156, 157, 158, 0, 143, 144, 149, 0, 811, 274, 88, 87, 0, 89, 0, 0, 0, 0, 155, 0, 0, 175, 0, 146, 0, 151, 148, 163, 161, 170, 0, 164, 165, 166, 167, 0, 168, 169, 0, 160, 171, 172, 173, 174, 0, 0, 177, 142, 162, 0, 0, 0, 0, 810, 0, 0, 141, 147, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 143, 144, 149, 0, 0, 0, 0, 0, 0, 274, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 146, 0, 0, 156, 157, 158, 0, 0, 0, 0, 0, 0, 0, 0, 0, 160, 159, 88, 87, 0, 89, 0, 0, 0, 0, 155, 0, 0, 175, 0, 0, 0, 151, 148, 163, 161, 170, 0, 164, 165, 166, 167, 0, 168, 169, 0, 0, 171, 172, 173, 174, 0, 0, 177, 142, 162, 0, 0, 274, 0, 0, 0, 0, 141, 147, 0, 0, 0, 0, 0, 0, 156, 157, 158, 0, 0, 0, 0, 0, 143, 144, 149, 0, 0, 0, 0, 88, 87, 0, 89, 0, 0, 0, 0, 155, 0, 0, 175, 0, 0, 0, 151, 148, 163, 161, 170, 0, 164, 165, 166, 167, 0, 168, 169, 160, 0, 171, 172, 173, 174, 0, 0, 177, 142, 162, 0, 0, 0, 0, 0, 0, 0, 763, 147, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 146, 0, 143, 144, 149, 0, 0, 0, 0, 0, 0, 0, 88, 87, 0, 89, 0, 0, 0, 0, 155, 0, 0, 175, 156, 157, 158, 151, 148, 163, 161, 170, 0, 164, 165, 166, 167, 160, 168, 169, 0, 0, 171, 172, 173, 174, 0, 0, 177, 142, 162, 0, 0, 0, 0, 146, 0, 0, 141, 147, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 143, 144, 149, 0, 0, 0, 0, 88, 87, 0, 89, 0, 0, 0, 0, 155, 0, 0, 175, 156, 157, 158, 151, 148, 163, 161, 170, 0, 164, 165, 166, 167, 0, 168, 169, 653, 146, 171, 172, 173, 174, 0, 0, 177, 142, 162, 88, 87, 0, 89, 0, 0, 0, 141, 147, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 143, 144, 149, 0, 0, 0, 0, 0, 0, 0, 0, 409, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 156, 157, 158, 996, 0, 0, 146, 0, 0, 0, 0, 0, 0, 458, 0, 0, 0, 391, 0, 0, 0, 407, 0, 0, 389, 390, 0, 0, 0, 393, 394, 405, 395, 396, 397, 398, 399, 400, 401, 402, 392, 0, 0, 0, 0, 0, 0, 406, 0, 0, 404, 0, 0, 0, 0, 0, 0, 403, 0, 0, 146, 0, 0, 0, 726, 0, 408, 0, 0, 156, 157, 158, 489, 175, 490, 0, 0, 151, 148, 163, 161, 170, 0, 164, 165, 166, 167, 0, 168, 169, 0, 0, 171, 172, 173, 174, 0, 0, 177, 142, 162, 0, 0, 0, 0, 0, 0, 0, 141, 147, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 143, 144, 149, 380, 175, 381, 0, 0, 151, 148, 163, 161, 170, 0, 164, 165, 166, 167, 0, 168, 169, 0, 0, 171, 172, 173, 174, 0, 0, 0, 142, 162, 0, 0, 0, 160, 0, 0, 0, 141, 147, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 143, 144, 149, 0, 175, 0, 0, 0, 151, 148, 163, 161, 170, 0, 164, 165, 166, 167, 0, 168, 169, 0, 0, 171, 172, 173, 174, 0, 0, 0, 142, 162, 0, 0, 0, 160, 0, 0, 0, 141, 147, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 143, 144, 149, 0, 0, 0, 0, 0, 0, 0, 175, 0, 0, 0, 151, 148, 163, 161, 170, 0, 164, 165, 166, 167, 0, 168, 169, 0, 0, 171, 172, 173, 174, 0, 0, 160, 142, 162, 0, 0, 0, 0, 0, 0, 0, 141, 147, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 143, 144, 149, 175, 0, 0, 0, 151, 148, 163, 161, 170, 0, 164, 165, 166, 167, 0, 168, 169, 0, 0, 171, 172, 173, 174, 0, 0, 0, 142, 162, 0, 0, 0, 0, 160, 0, 0, 141, 147, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 143, 144, 149, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 160 }; YYSTATIC YYCONST short YYFARDATA YYPACT[] = { -1000, 1423,-1000, 609, 586,-1000,-1000,-1000,-1000,-1000, -1000,-1000, 581, 555, 539, 514,-1000,-1000,-1000, 102, 102, -466, 124, 124,-1000,-1000,-1000, 478,-1000, -115, 535,-1000, 907, 1099, 68, 903, 102, -355, -356,-1000, -139, 855, 68, 855,-1000,-1000,-1000, 172, 2319, 535, 535, 535, 535,-1000,-1000, 187,-1000,-1000,-1000, -164, 1074,-1000,-1000, 1825, 68, 68,-1000,-1000, 1368,-1000, -1000,-1000,-1000,-1000,-1000,-1000, 102, -121,-1000,-1000, -1000,-1000, 691, -120, 2983, 1193,-1000,-1000,-1000,-1000, 2436,-1000, 102,-1000, 1385,-1000, 1310, 1718, 68, 1169, 1163, 1159, 1144, 1120, 1114, 1716, 1518, 83,-1000, 102, 655, 878,-1000,-1000, 86, 1193, 535, 2983,-1000,-1000, -1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000, -1000,-1000,-1000, 1515, 185, 1288, 1061, -229, -230, -231, -238, 691,-1000, -101, 691, 1255, 312,-1000,-1000, 48, -1000, 3564, 239, 1081,-1000,-1000,-1000,-1000,-1000, 3394, -1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000, 504,-1000,-1000,-1000,-1000,-1000, 1193, 1715, 538, 1193, 1193, 1193,-1000, 3232, 123,-1000,-1000, 1714, 1066, 2884, -1000, 3564,-1000,-1000,-1000, 65, 65,-1000, 1713,-1000, -1000, 99, 1513, 1512, 1575, 1397,-1000,-1000, 102,-1000, 102, 87,-1000,-1000,-1000,-1000, 1173,-1000,-1000,-1000, -1000,-1000, 901, 102, 3193,-1000, 21, -69,-1000,-1000, 201, 102, 124, 610, 68, 201, 1255, 3339, 2983, -88, 65, 2884, 1712,-1000, 215,-1000,-1000,-1000,-1000,-1000, -1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000, 511, 545, 860, 1606,-1000, 100,-1000, 355, 691,-1000, -1000, 2983,-1000,-1000, 164, 1217, 65, 535,-1000,-1000, -1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000, -1000,-1000,-1000,-1000,-1000,-1000,-1000, 1711, 1710, 2114, 895, 349, 1284, 1709, 123, 1511, -48,-1000, 102, -48, -1000, 124,-1000, 102,-1000, 102,-1000, 102,-1000,-1000, -1000,-1000, 891,-1000, 102, 102,-1000, 1193,-1000,-1000, -1000, -369,-1000,-1000,-1000,-1000,-1000, 878, -47, 116, -1000,-1000, 1193, 999,-1000, 1299, 789, 1708,-1000, 170, 535, 157,-1000,-1000,-1000, 1704, 1690, 3564, 535, 535, 535, 535,-1000, 691,-1000,-1000, 3564, 228,-1000, 1193, -1000, -68,-1000, 1217, 879, 889, 887, 535, 535, 2721, -1000,-1000,-1000,-1000,-1000,-1000, 102, 1299, 1070,-1000, -1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000, -1000,-1000,-1000,-1000,-1000, 406,-1000,-1000,-1000, 1688, 1052,-1000, 791, 1508,-1000,-1000, 2580,-1000,-1000, 102, -1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000, 458, 446, 417,-1000,-1000,-1000,-1000,-1000, 102, 102, 402, 3124,-1000,-1000, -304, -196,-1000,-1000,-1000,-1000,-1000, -1000,-1000, -53, 1687,-1000, 102, 1158, 39, 65, 794, 640, 102,-1000, -69, 107, 107, 107, 107, 2983, 215, -1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000, -1000, 1685, 1681, 1506,-1000,-1000,-1000, 2721,-1000,-1000, -1000,-1000, 1299, 1680, 68, 3564,-1000, 201, 1285,-1000, -119, -124,-1000,-1000, -351,-1000,-1000, 68, 411, 454, 68,-1000,-1000, 1041,-1000,-1000, 68,-1000, 68,-1000, 1040, 991,-1000,-1000, 535, -157, -360, 1679,-1000,-1000, -1000,-1000, 535, -361,-1000,-1000, -346,-1000,-1000,-1000, 1282,-1000, 869, 535, 3564, 1193, 3510, 102, 108, 1181, -1000,-1000,-1000,-1000,-1000,-1000,-1000, 1678,-1000,-1000, -1000,-1000,-1000,-1000, 1677,-1000,-1000, 1385, 108, 1505, -1000, 1503, 883, 1502, 1498, 1497, 1496, 1492,-1000, 362, 1157,-1000, 97, 1193,-1000,-1000,-1000, 298, 535, 108, 388, 175, 3052,-1000,-1000, 1278, 1193,-1000, 793,-1000, -1000, -50, 2983, 2983, 943, 1277, 1217, 1193, 1193, 1193, 1193,-1000, 2418,-1000, 1193,-1000, 535, 535, 535, 867, 1193, 33, 1193, 494, 1491,-1000, 128,-1000,-1000,-1000, -1000,-1000,-1000, 102,-1000, 1299,-1000,-1000, 1255, 30, 1076,-1000,-1000, 1193, 1490, 1202,-1000,-1000,-1000,-1000, -1000,-1000, -10, 65, 465, 459, 2983, 2816, -106, -47, 1488, 1265,-1000,-1000, 3510, -53, 881, 102, -96, 3564, 102, 1193, 102, 1238, 876,-1000,-1000,-1000, 201,-1000, -1000,-1000,-1000,-1000,-1000,-1000, 102, 124,-1000, -18, 1193, 108, 1487, 1386, 1485, 1262, 1259,-1000, 123, 102, 102, 1482, 1155,-1000,-1000, 1299, 1674, 1477, 1673, 1476, 1475, 1672, 1668, 1193, 535,-1000, 535, 102, 141, 535, 68, 2983, 535, 706, 1298, 81, -182, 1471, 95, 1795, 131, 1877, 102,-1000, 1306,-1000, 900,-1000, 900, 900, 900, 900, 900, -166,-1000, 102, 102, 535,-1000,-1000, -1000,-1000,-1000,-1000, 1193, 1470, 1234, 1083,-1000,-1000, 347, 1230, 964, 271, 166,-1000, 46, 102, 1469, 1468, -1000, 3564, 1667, 1081, 1081, 1081, 535, 535,-1000, 941, 542, 128,-1000,-1000,-1000,-1000,-1000, 1467, 343, 226, 958, -96, 1659, 1658, 3449,-1000,-1000, 1568, 104, 204, 690, -96, 3564, 102, 1193, 102, 1235, -322, 535, 1193, -1000,-1000, 3564,-1000,-1000, 1193,-1000, -53, 81, 1466, -241,-1000,-1000, 1193, 2721, 874, 873, 2983, 945, -126, -137, 1457, 1456, 535, 1300,-1000, -53,-1000, 201, 201, -1000,-1000,-1000,-1000, 411,-1000,-1000,-1000,-1000,-1000, -1000,-1000, 1081, 1193, 1455, 102, 1193, 1451,-1000, 535, -96, 1655, 871, 864, 856, 854,-1000, 108, 1670,-1000, -1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000, 1154, 1148, 1654, 945, 123, 1446, 947, 68, 1639, -405, -56,-1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000, -1000,-1000, 495,-1000,-1000,-1000,-1000,-1000,-1000,-1000, -1000,-1000,-1000,-1000, 1635, 1635,-1000, 1635, 1762,-1000, -1000, -408,-1000, -387,-1000,-1000, -429,-1000,-1000,-1000, 1442,-1000,-1000,-1000,-1000,-1000,-1000,-1000, 123,-1000, -1000,-1000,-1000,-1000, 165, 331, 1193,-1000, 108, 829, 338,-1000, 3052, 374, 955,-1000,-1000,-1000,-1000,-1000, 1217, -53, 1081, 1193,-1000, 535, 1223, 2983,-1000,-1000, -1000, 393,-1000,-1000,-1000, 1111, 1102, 1094, 1075, 1067, 1055, 1054, 1033, 1032, 1031, 997, 996, 995, 399, 987, 975, 68, 455, 1076, -53, -53, 102, 938,-1000,-1000, -1000, 1255, 1255, 1255, 1255,-1000,-1000,-1000,-1000,-1000, -1000, 1255, 1255, 1255,-1000,-1000,-1000,-1000,-1000, -441, 2721, 853, 852, 2983,-1000, 1255, 1193, 1181,-1000, 123, -1000, 123, -23,-1000, 1227,-1000,-1000, 1913, 123, 102, -1000,-1000, 1193,-1000, 1439,-1000,-1000, 1143,-1000,-1000, -287, 998, 1877,-1000,-1000,-1000,-1000, 1299,-1000, -236, -257, 102,-1000,-1000,-1000,-1000, 383, 192, 108, 899, 880,-1000,-1000,-1000,-1000,-1000,-1000,-1000, -434,-1000, -1000, 35,-1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000, -1000,-1000,-1000, 336,-1000,-1000,-1000,-1000,-1000,-1000, -1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000, -1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000, -1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000, 102,-1000,-1000,-1000,-1000, 1624, 1299, 1622,-1000,-1000, -1000,-1000,-1000, 359, 1438, 1223,-1000, 128, 1433, 1220, -1000, 2375,-1000,-1000,-1000, -37, 102, 977, 102, 1938, 102, 110, 102, 93, 102, 124, 102, 102, 102, 102, 102, 102, 102, 124, 102, 102, 102, 102, 102, 102, 102, 974, 968, 953, 913, 102, 102, -112, 102, 1432, 1299,-1000,-1000, 1621, 1616, 1430, 1429, 851,-1000,-1000, -1000,-1000,-1000,-1000,-1000,-1000, 65, -25,-1000, 1214, -1000, 1216,-1000,-1000, -96, 2983,-1000,-1000, 1299,-1000, 1615, 1614, 1613, 1608, 1607, 1605, 18, 1604, 1603, 1602, 1597, 1595, 1590,-1000,-1000,-1000, 411,-1000, 1586, 1426, 1335,-1000,-1000,-1000,-1000, 1425,-1000, 740, 102,-1000, 1275, 102, 102, 950, 108, 850,-1000,-1000,-1000,-1000, -1000,-1000,-1000, 155, 102, 5, 371,-1000,-1000,-1000, -1000,-1000, 2983, 395,-1000,-1000,-1000, 1172, 1422, 1417, 847, 144, 1416, 1413, 846, 1412, 844, 1408, 1407, 843, 1406, 1404, 842, 1402, 841, 1398, 833, 1396, 831, 1384, 830, 1378, 828, 1377, 823, 1375, 811, 1373, 787, 124, 102, 102, 102, 102, 102, 102, 102, 1372, 780, 1370, 759,-1000, 332, -53, -53,-1000,-1000, 822, 3564, -96, 2983, -53, 969,-1000, 1585, 1584, 1576, 1573, 1142, -53, -1000,-1000,-1000,-1000, 102, 758, 108, 757, 752, 102, 1299,-1000,-1000, 1366, 1133, 1125, 1085, 1365,-1000, 73, -1000, 1068, 735, 101,-1000,-1000,-1000, 1571, 1363,-1000, -1000, 1570,-1000, 1556,-1000,-1000, 1554,-1000,-1000, 1553, -1000, 1552,-1000, 1551,-1000, 1549,-1000, 1542,-1000, 1535, -1000, 1534,-1000, 1533,-1000, 1532, 1362, 723, 1360, 716, 1352, 687, 1347, 677,-1000, 1530,-1000, 1529,-1000, 1343, 1338,-1000, 2721, 969,-1000, 1334, 1528,-1000, 857, 411, 1331, 429,-1000, 1261,-1000, 2042, 1330,-1000, 102, 102, 102,-1000,-1000, 1938,-1000,-1000,-1000,-1000,-1000,-1000, -1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000, -1000, 1526,-1000, 1525,-1000, 1524,-1000, 1522,-1000,-1000, -1000,-1000, -39, 1521, 945, -53,-1000,-1000,-1000, 108, -1000, 947,-1000, 1327, 1324, 1323,-1000, 182, 1106, 2264, 428, 278, 527, 608, 582, 562, 443, 544, 530, 426, -1000,-1000,-1000,-1000, 405, 135, -96, -53,-1000, 1321, 2115, 1203,-1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000, 88,-1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000, -1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000, -1000,-1000, 328, 381, 357, 350,-1000,-1000,-1000, 1520, 1320,-1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000, -1000,-1000,-1000,-1000,-1000,-1000,-1000, 1424, 108,-1000, -1000,-1000,-1000,-1000, -53, -443, 102, 1296, 1319, -188, 1316,-1000,-1000, 65,-1000, 3564, 2721, -46, -96, 969, 1369, -53, 1307,-1000 }; YYSTATIC YYCONST short YYFARDATA YYPGO[] = { 0, 33, 178, 5, 1991, 78, 39, 7, 1989, 0, 1988, 1984, 1982, 268, 80, 1981, 1977, 4, 1972, 52, 40, 3, 26, 32, 24, 6, 1970, 44, 41, 45, 1969, 38, 34, 10, 17, 11, 31, 1968, 42, 1967, 35, 18, 1966, 1965, 9, 1, 13, 8, 1954, 1950, 1947, 1946, 22, 27, 43, 1945, 1944, 1943, 1942, 15, 1941, 1940, 12, 1939, 30, 1938, 14, 36, 16, 23, 46, 2, 599, 59, 1236, 29, 106, 1928, 1924, 1921, 1920, 1919, 1918, 19, 28, 1917, 1329, 1916, 1915, 25, 789, 131, 1914, 50, 1221, 1913, 1912, 1911, 1910, 1909, 1901, 1900, 1899, 1898, 1897, 1895, 1892, 1891, 1890, 1028, 1888, 67, 56, 1887, 65, 134, 62, 55, 1885, 1884, 89, 1882, 1881, 1880, 1874, 1869, 1866, 53, 1864, 1863, 1862, 100, 70, 49, 1861, 92, 292, 1859, 1858, 1856, 1855, 1850, 1849, 1843, 1842, 1839, 1838, 1837, 1830, 832, 1829, 1814, 1813, 1812, 1811, 1810, 1803, 1802, 75, 1801, 1800, 125, 1797, 1796, 1795, 130, 1791, 1790, 1783, 1782, 1781, 1779, 1778, 58, 1760, 63, 1777, 54, 1776, 602, 1762, 1761, 1759, 1646, 1615, 1438 }; YYSTATIC YYCONST yyr_t YYFARDATA YYR1[]={ 0, 109, 109, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 136, 136, 36, 36, 133, 133, 133, 2, 2, 1, 1, 1, 9, 24, 24, 23, 23, 23, 134, 134, 134, 134, 134, 135, 135, 135, 135, 135, 135, 135, 135, 135, 93, 93, 93, 93, 94, 94, 94, 94, 10, 11, 73, 72, 72, 59, 61, 61, 61, 62, 62, 62, 65, 65, 132, 132, 132, 60, 60, 60, 60, 60, 60, 130, 130, 130, 119, 12, 12, 12, 12, 12, 12, 118, 137, 113, 138, 139, 111, 77, 77, 77, 77, 77, 77, 77, 77, 77, 77, 77, 77, 77, 77, 77, 77, 77, 77, 77, 77, 77, 77, 77, 77, 77, 77, 77, 140, 140, 141, 141, 112, 112, 142, 142, 56, 56, 57, 57, 69, 69, 18, 18, 18, 18, 18, 19, 19, 68, 68, 67, 67, 58, 21, 21, 22, 143, 143, 143, 143, 143, 143, 143, 143, 143, 143, 143, 143, 143, 143, 143, 143, 143, 143, 143, 143, 143, 116, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 4, 4, 35, 35, 16, 16, 75, 75, 75, 75, 75, 75, 75, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 76, 74, 74, 74, 74, 74, 74, 144, 144, 81, 81, 81, 145, 145, 150, 150, 150, 150, 150, 150, 150, 150, 146, 82, 82, 82, 147, 147, 151, 151, 151, 151, 151, 151, 151, 152, 38, 38, 34, 34, 153, 114, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 83, 83, 83, 83, 83, 83, 83, 83, 83, 83, 83, 83, 83, 83, 83, 83, 3, 3, 3, 13, 13, 13, 13, 13, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 154, 115, 115, 155, 155, 155, 155, 155, 155, 155, 155, 155, 155, 155, 155, 155, 155, 155, 155, 155, 155, 155, 155, 155, 155, 155, 155, 155, 155, 158, 159, 156, 161, 161, 160, 160, 160, 163, 162, 162, 162, 162, 166, 166, 166, 169, 164, 167, 168, 165, 165, 165, 117, 170, 170, 172, 172, 172, 171, 171, 173, 173, 14, 14, 174, 174, 174, 174, 174, 174, 174, 174, 174, 174, 174, 174, 174, 174, 174, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 175, 31, 31, 32, 32, 39, 39, 39, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 42, 42, 42, 43, 43, 43, 47, 47, 46, 46, 45, 45, 44, 44, 48, 48, 49, 49, 49, 50, 50, 50, 50, 51, 51, 149, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 6, 6, 6, 6, 6, 53, 53, 54, 54, 55, 55, 25, 25, 26, 26, 27, 27, 27, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 5, 5, 71, 71, 71, 71, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 20, 20, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 30, 30, 29, 29, 29, 29, 29, 131, 131, 131, 131, 131, 131, 64, 64, 64, 63, 63, 87, 87, 84, 84, 85, 17, 17, 37, 37, 37, 37, 37, 37, 37, 37, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 176, 176, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 121, 121, 88, 88, 89, 89, 177, 122, 90, 90, 90, 90, 90, 90, 90, 90, 90, 90, 123, 123, 178, 178, 178, 66, 66, 179, 179, 179, 179, 179, 179, 180, 182, 181, 124, 124, 125, 125, 183, 183, 183, 183, 126, 148, 91, 91, 91, 91, 91, 91, 91, 91, 91, 91, 127, 127, 184, 184, 184, 184, 184, 184, 184, 128, 128, 92, 92, 92, 129, 129, 185, 185, 185, 185 }; YYSTATIC YYCONST yyr_t YYFARDATA YYR2[]={ 0, 0, 2, 4, 4, 3, 1, 1, 1, 1, 1, 1, 4, 4, 4, 4, 1, 1, 1, 2, 2, 3, 2, 2, 1, 1, 1, 4, 1, 0, 2, 1, 3, 2, 4, 6, 1, 1, 1, 1, 3, 1, 1, 1, 1, 4, 4, 4, 4, 4, 4, 4, 2, 3, 2, 2, 2, 1, 1, 2, 1, 2, 4, 6, 3, 5, 7, 9, 3, 4, 7, 1, 1, 1, 2, 0, 2, 2, 0, 6, 2, 1, 1, 1, 1, 1, 1, 1, 1, 3, 2, 3, 1, 2, 3, 7, 0, 2, 2, 2, 2, 2, 3, 3, 2, 1, 4, 3, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 2, 2, 2, 5, 0, 2, 0, 2, 0, 2, 3, 1, 0, 1, 1, 3, 0, 3, 1, 1, 1, 1, 1, 0, 2, 4, 3, 0, 2, 3, 0, 1, 5, 3, 4, 4, 4, 1, 1, 1, 1, 1, 2, 2, 4, 13, 22, 1, 1, 5, 3, 7, 5, 4, 7, 0, 2, 2, 2, 2, 2, 2, 2, 5, 2, 2, 2, 2, 2, 2, 5, 0, 2, 0, 2, 0, 3, 9, 9, 7, 7, 1, 1, 1, 2, 2, 1, 4, 0, 1, 1, 2, 2, 2, 2, 1, 4, 2, 5, 3, 2, 2, 1, 4, 3, 0, 2, 2, 0, 2, 2, 2, 2, 2, 1, 1, 1, 1, 9, 0, 2, 2, 0, 2, 2, 2, 2, 1, 1, 1, 1, 1, 0, 4, 1, 3, 1, 13, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 5, 8, 6, 5, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4, 4, 5, 1, 1, 1, 0, 4, 4, 4, 4, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 5, 1, 0, 2, 2, 1, 2, 4, 5, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 4, 6, 4, 4, 11, 1, 5, 3, 7, 5, 5, 3, 1, 2, 2, 1, 2, 4, 4, 1, 2, 2, 2, 2, 2, 2, 2, 1, 2, 1, 1, 1, 4, 4, 2, 4, 2, 0, 1, 1, 3, 1, 3, 1, 0, 3, 5, 4, 3, 5, 5, 5, 5, 5, 5, 2, 2, 2, 2, 2, 2, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 3, 2, 0, 1, 1, 2, 1, 1, 1, 1, 4, 4, 5, 4, 4, 4, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 7, 7, 7, 7, 7, 0, 2, 2, 0, 2, 2, 0, 2, 0, 2, 0, 2, 0, 2, 0, 2, 0, 2, 2, 0, 2, 3, 2, 0, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 2, 2, 3, 2, 2, 2, 5, 3, 2, 2, 2, 2, 2, 5, 4, 6, 2, 4, 0, 3, 3, 1, 1, 0, 3, 0, 1, 1, 3, 0, 1, 1, 3, 1, 3, 4, 4, 4, 4, 5, 1, 1, 1, 1, 1, 1, 1, 3, 1, 3, 4, 1, 0, 10, 6, 5, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 3, 4, 6, 5, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 2, 2, 4, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 1, 0, 5, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 1, 3, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 3, 2, 3, 4, 2, 2, 2, 5, 5, 7, 4, 3, 2, 3, 2, 1, 1, 2, 3, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 3, 0, 1, 1, 3, 2, 6, 7, 3, 3, 3, 6, 0, 1, 3, 5, 6, 4, 4, 1, 3, 3, 1, 1, 1, 1, 4, 1, 6, 6, 6, 4, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 2, 5, 4, 7, 6, 7, 6, 9, 8, 3, 8, 4, 0, 2, 0, 1, 3, 3, 0, 2, 2, 2, 3, 2, 2, 2, 2, 2, 0, 2, 3, 1, 1, 1, 1, 3, 8, 2, 3, 1, 1, 3, 3, 3, 4, 6, 0, 2, 3, 1, 3, 1, 4, 3, 0, 2, 2, 2, 3, 3, 3, 3, 3, 3, 0, 2, 2, 3, 3, 4, 2, 1, 1, 3, 5, 0, 2, 2, 0, 2, 4, 3, 1, 1 }; YYSTATIC YYCONST short YYFARDATA YYCHK[]={ -1000,-109,-110,-111,-113,-114,-116,-117,-118,-119, -120,-121,-122,-124,-126,-128,-130,-131,-132, 525, 526, 460, 528, 529,-133,-134,-135, 532, 533,-139, 409,-152, 411,-170,-137, 455,-176, 463, 408, 470, 471, 430, -87, 431, -93, -94, 273, 449, 530, 534, 535, 536, 537, 538, 539, 540, 59,-138, 410, 412, 454, 447, 448, 450, -10, -11, 123, 123,-115, 123, 123, 123, 123, -9, 264, -9, 527, -88, -24, 265, 264, -24, 123,-140, 314, -1, -2, 261, 260, 263, -78, -16, 91,-171, 123,-174, 278, 38,-175, 286, 287, 284, 283, 282, 281, 288, -31, -32, 267, 91, -9, -90, 469, 469, -92, -1, 469, -86, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, -31, -86, 263, -28, -70, -74, -93, -94, 306, 297, 322, 323,-149, 33, 307, 276, 324, -52, 275, 91, -5, -76, 268, 413, 414, 415, 358, 357, 278, 298, 277, 281, 282, 283, 284, 286, 287, 279, 290, 291, 292, 293, 271, -1, 296, -1, -1, -1, -1, 262, -77,-172, 318, 379, 61, -73, 40, -75, -7, -76, 269, 272, 325, 340, -8, 295, 300, 302, 308, -31, -31,-112,-109, 125,-155, 416,-156, 418,-154, 420, 421,-117,-157, -2,-131,-120,-133, -132,-135, 472, 458, 508,-158, 507,-160, 419, -95, -96, -97, -98, -99,-108,-100,-101,-102,-103,-104, -105,-106,-107,-159,-163, 395, 396, 397, 398, 399, 400, 401, 402, 403, 404, 405, 406, 407, 123, 417, -123,-125,-127,-129, -9, -1, 461,-136, -70, -76, -141, 315, -71, -70, 91, -28,-149, 46, -7, 328, 329, 330, 331, 332, 326, 346, 353, 337, 365, 366, 367, 368, 369, 370, 371, 351, 378, 294, 372, -79, -9,-173,-174, 42, 40, -31, 40, -14, 91, 40, -14, 40, -14, 40, -14, 40, -14, 40, -14, 40, 41, 267, -9, 263, 58, 44, 262, -1, 354, 355, 356, 473, 379, 475, 476, 477, 478, -90, -91, -1, 329, 330, -1, -71, 41, -36, 61, 288, 262, 44, 390, 91, 38, 42, 359, 360, 361, 60, 390, 390, 390, 390, -70, 306, -70, -75, -7, 33, -9, -1, 280, 279, 289, -28, -1, -76, 42, 471, 47, -28, 270, 272, 281, 282, 283, 284, 40, -36, -1, 329, 330, 322, 345, 334, 335, 337, 338, 339, 340, 341, 342, 343, 344, 362, 355, 336, 352, 326, 371, 294, -2, 40, 61, -72, -71, -74, -28, -7, -7, 40, 301, 303, 304, 305, 41, 41, 125,-143,-114,-111, -144,-146,-116,-117,-131,-120,-132, 452, 453,-148, 508,-133,-135, 507, 321, 422, 427, 472, 408, 125, -9, -9, 40, 451, 58, 91, -9, -71, 357, 364, 541, 91,-161,-162,-164,-166,-167,-168, 311,-169, 309, 313, 312, -9, -2, -9, -24, 40, -23, -24, 266, 286, 287, -31, -9, -2, -75, -28, -76, 270, 272, -71, -36, 341,-175, -7, -72, 40,-115,-158, -2, -9, 125,-178, 462,-131,-179,-180, 467, 468, -181,-132,-135, 464, 125,-183,-177,-179,-182, 338, 462, 465, 125,-184, 460, 408, 463, 296,-132,-135, 125,-185, 460, 463,-132,-135, -89, 420, 125,-136, -142, -71, -1, 471, -7, -1, -13, 40, 40, -28, 328, 329, 330, 331, 377, 371, 326, 479, 365, 366, 367, 368, 375, 376, 294, 93, 125, 44, 40, -2, 41, -23, -9, -23, -24, -9, -9, -9, 93, -9, -9, 474, -1, -1, 330, 329, 327, 336, 390, 40, 61, 43, 123, 40, 40, 263, -1, 93, -30, -29, 275, -9, 40, 40, -54, -55, -28, -1, -1, -1, -1, -70, -28, -9, -1, 280, 93, 93, 93, -1, -1, -71, -1, 91, -9, -69, 60, 329, 330, 331, 365, 366, 367, 40, 61, -36, 123, 40, 41, -71, -3, 373, 374, -1, -9,-115, 123, 123, 123, -9, -9, 123, -71, 357, 364, 541, 364, -81, -82, -91, -25, -26, -27, 275, -13, 40, -9, 58, 274, -7, 91, -1, 91, -1, -9,-161,-165,-158, 310,-165, -165,-165, -71,-158, -2, -9, 40, 40, 41, -71, -1, 40, -31, -28, -6, -2, -9, 125, 316, 316, 466, -31, -66, -9, 42, -36, 61, -31, 61, -31, -31, 61, 61, -1, 469, -9, 469, 40, -1, 469, -177, 44, 93, -1, -28, -28, 91, -9, -36, -83, -1, 40, 40,-173, -36, 41, 41, 93, 41, 41, 41, 41, 41, -12, 263, 44, 58, 390, 329, 330, 331, 365, 366, 367, -1, -84, -85, -36, 123, 262, -64, -63, -71, 306, 44, 93, 44, 275, -71, -71, 62, 44, 42, -5, -5, -5, 93, 274, 41, -68, -19, -18, 43, 45, 306, 323, 373, -9, -59, -61, -73, 274, -53, -22, 60, 41, 125,-112,-145,-147, -127, 274, -7, 91, -1, 91, -1, -71, -71, -1, 371, 326, -7, 371, 326, -1, 41, 44, -28, -25, 93, -9, -3, -1, -28, -9, -9, 44, 93, -2, -9, -9, -24, 274, -36, 41, 40, 41, 44, 44, -2, -9, -9, 41, 58, 40, 41, 40, 41, 41, 40, 40, -5, -1, -9, 317, -1, -31, -71, 93, -38, 479, 504, 505, 506, -9, 41, 390, -83, 41, 387, 341, 342, 343, 388, 389, 301, 303, 304, 305, 391, 394, 294, -4, 317, -34, -33,-153, 480, 482, 483, 484, 485, 276, 277, 281, 282, 283, 284, 286, 287, 257, 279, 290, 291, 292, 293, 486, 487, 488, 490, 491, 492, 493, 494, 495, 496, 334, 497, 280, 289, 336, 498, 341, 489, 357, 390, 502, 271, 123, -9, 41, -14, -14, -14, -14, -14, -14, 317, 283, 284, 456, 457, 459, -9, -9, -1, 41, 44, 61, -59, 125, 44, 61, 263, 263, -29, -9, 41, 41, -28, 40, -5, -1, 62, -58, -1, 40, -19, 41, 125, -62, -40,-135, -41, 298, 364, 297, 286, 287, 284, 283, 282, 281, 293, 292, 291, 290, 279, 278, 277,-175, 61, -3, 40, 40, 91, -54, 125, 125, -150, 423, 424, 425, 426,-120,-132,-133,-135, 125, -151, 428, 429, 426,-132,-120,-133,-135, 125, -3, -28, -9, -9, 44, -93, 450, -1, -28, -27, -38, 41, 390, -71, 93, 93, -71, -35, 61, 316, 316, 41, 41, -1, 41, -25, -6, -6, -66, 41, -9, 41, -3, 40, 93, 93, 93, 93, -36, 41, 58, 58, 40, -35, -2, 41, 42, 91, -32, 40, 481, 501, 277, 281, 282, 283, 284, 280, -20, 40, -20, -20, -15, 510, 483, 484, 276, 277, 281, 282, 283, 284, 286, 287, 279, 290, 291, 292, 293, 42, 486, 487, 488, 490, 491, 494, 495, 497, 280, 289, 257, 511, 512, 513, 514, 515, 516, 517, 518, 519, 520, 521, 522, 523, 496, 488, 500, 41, -2, 263, 263, 44, -84, -37, -17, -9, 283, -36, -70, 319, 320, 125, -64, 123, 61, -25, -1, -67, 44, -56, -57, -71, -65,-135, 358, 363, 40, 91, 40, 91, 40, 91, 40, 91, 40, 91, 40, 91, 40, 91, 40, 91, 40, 91, 40, 91, 40, 91, 40, 91, 40, 91, 284, 283, 282, 281, 40, 91, 40, 91, -31, -36, 123, 40, -53, -22, -25, -25, -9, 62, -75, -75, -75, -75, -75, -75, -75, 509, -71, 93, 93, -71, -1, -2, -2, 274, 44, -39, -41, -36, 299, 286, 287, 284, 283, 282, 281, 279, 293, 292, 291, 290, 278, 277, -2, -9, 41, 58, -89, -69, -34, -83, 392, 393, 392, 393, -9, 93, -9, 43, 125, -36, 91, 91, 503, 44, 91, 524, 38, 281, 282, 283, 284, 280, -9, 40, 40, -62, 123, 41, -67, -68, 41, 44, -60, -52, 364, 297, 345, 299, 263, -9, 306, -70, 299, -9, -40, -9, -23, -9, -9, -23, -24, -9, -24, -9, -9, -9, -9, -9, -9, -9, -24, -9, -9, -9, -9, -9, -9, -9, 40, 91, 40, 91, 40, 91, 40, 91, -9, -9, -17, -9, 41, -59, 40, 40, 41, 41, 93, -7, 274, 44, 40, -3, -71, 284, 283, 282, 281, -66, 40, 41, 41, 41, 93, 43, -9, 44, -9, -9, 61, -36, 93, 263, -9, 281, 282, 283, -9, 125, -62, -71, -1, 91, 306, -70, 41, 41, 93, 263, 41, 41, 93, 41, 93, 41, 41, 93, 41, 41, 93, 41, 93, 41, 93, 41, 93, 41, 93, 41, 93, 41, 93, 41, 93, 41, 93, -24, -9, -9, -9, -9, -9, -9, -9, 41, 93, 41, 93, 125, -25, -25, 62, -28, -3, -71, -25, -21, -22, 60, 58, -25, -9, 93, -36, 93, 93, -9, 41, 58, 58, 58, 41, 125, 61, 93, 263, 40, 41, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 41, 93, 41, 93, 41, 93, 41, 93, 40, 40, 41, 41, -71, -21, 41, 40, -66, 41, 93, 44, 41, -33, 41, -9, -9, -9, -40, -49, -50, -51, -42, -43, -47, -46, -45, -44, -47, -46, -45, -44, 40, 40, 40, 40, -45, -48, 274, 40, -35, -25, -80, -36, 41, 41, 41, 41, 299, 263, 41, 299, 306, -70, 41, -40, 41, -23, -9, 41, -23, -24, 41, -24, 41, -9, 41, -9, 41, -9, 41, 41, 41, 41, -47, -46, -45, -44, 41, 41, -17, -3, -25, 41, 123, 324, 379, 380, 381, 308, 382, 383, 384, 385, 333, 347, 348, 349, 350, 294, 44, 263, 41, 41, 41, 41, 40, 41, 40, -36, -25, 509, -9, 41, 41, 357, 41, -7, -28, -71, 274, -3, -21, 40, -25, 41 }; YYSTATIC YYCONST short YYFARDATA YYDEF[]={ 1, -2, 2, 0, 0, 333, 6, 7, 8, 9, 10, 11, 0, 0, 0, 0, 16, 17, 18, 0, 0, 772, 0, 0, 24, 25, 26, 0, 28, 135, 0, 269, 206, 0, 431, 0, 0, 778, 105, 835, 92, 0, 431, 0, 83, 84, 85, 0, 0, 0, 0, 0, 0, 57, 58, 0, 60, 108, 262, 387, 0, 757, 758, 219, 431, 431, 139, 1, 0, 788, 806, 824, 838, 19, 41, 20, 0, 0, 22, 42, 43, 23, 29, 137, 0, 104, 38, 39, 36, 37, 219, 186, 0, 384, 0, 391, 0, 0, 431, 394, 394, 394, 394, 394, 394, 0, 0, 432, 433, 0, 760, 0, 778, 814, 0, 93, 0, 0, 742, 743, 744, 745, 746, 747, 748, 749, 750, 751, 752, 753, 754, 755, 756, 0, 0, 33, 0, 0, 0, 0, 0, 0, 668, 0, 0, 219, 0, 684, 685, 0, 689, 0, 0, 549, 233, 551, 552, 553, 554, 0, 489, 691, 692, 693, 694, 695, 696, 697, 698, 699, 0, 704, 705, 706, 707, 708, 555, 0, 52, 54, 55, 56, 59, 0, 386, 388, 389, 0, 61, 0, 71, 0, 212, 213, 214, 219, 219, 217, 0, 220, 221, 226, 0, 0, 0, 0, 5, 334, 0, 336, 0, 0, 340, 341, 342, 343, 0, 345, 346, 347, 348, 349, 0, 0, 0, 355, 0, 0, 332, 504, 0, 0, 0, 0, 431, 0, 219, 0, 0, 0, 219, 0, 0, 333, 0, 490, 491, 492, 493, 494, 495, 496, 497, 498, 499, 500, 501, 502, 362, 369, 0, 0, 0, 0, 21, 774, 773, 0, 29, 550, 107, 0, 136, 557, 0, 560, 219, 0, 311, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 0, 0, 0, 0, 0, 393, 0, 0, 0, 0, 405, 0, 0, 406, 0, 407, 0, 408, 0, 409, 0, 410, 430, 102, 434, 0, 759, 0, 0, 769, 777, 779, 780, 781, 0, 783, 784, 785, 786, 787, 0, 0, 833, 836, 837, 94, 718, 719, 720, 0, 0, 31, 0, 0, 711, 673, 674, 675, 0, 0, 534, 0, 0, 0, 0, 667, 0, 670, 228, 0, 0, 681, 683, 686, 0, 688, 690, 0, 0, 0, 0, 0, 0, 231, 232, 700, 701, 702, 703, 0, 53, 147, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 0, 131, 132, 133, 0, 0, 103, 0, 0, 72, 73, 0, 215, 216, 0, 222, 223, 224, 225, 64, 68, 3, 140, 333, 0, 0, 0, 168, 169, 170, 171, 172, 0, 0, 0, 0, 178, 179, 0, 0, 236, 250, 814, 105, 4, 335, 337, -2, 0, 344, 0, 0, 0, 219, 0, 0, 0, 363, 365, 0, 0, 0, 0, 0, 0, 379, 380, 377, 505, 506, 507, 508, 503, 509, 510, 44, 0, 0, 0, 512, 513, 514, 0, 517, 518, 519, 520, 521, 0, 431, 0, 525, 527, 0, 366, 0, 0, 12, 789, 0, 791, 792, 431, 0, 0, 431, 799, 800, 0, 13, 807, 431, 809, 431, 811, 0, 0, 14, 825, 0, 0, 0, 0, 831, 832, 15, 839, 0, 0, 842, 843, 771, 775, 27, 30, 138, 142, 0, 0, 0, 40, 0, 0, 292, 0, 187, 188, 189, 190, 191, 192, 193, 0, 195, 196, 197, 198, 199, 200, 0, 207, 390, 0, 0, 0, 398, 0, 0, 0, 0, 0, 0, 0, 96, 762, 0, 782, 804, 812, 815, 816, 817, 0, 0, 0, 0, 0, 722, 727, 728, 34, 47, 671, 0, 709, 712, 713, 0, 0, 0, 535, 536, 48, 49, 50, 51, 669, 0, 680, 682, 687, 0, 0, 0, 0, 556, 0, -2, 711, 0, 106, 154, 125, 126, 127, 128, 129, 130, 0, 385, 62, 75, 69, 219, 0, 532, 308, 309, -2, 0, 0, 139, 239, 253, 173, 174, 824, 0, 219, 0, 0, 0, 0, 219, 0, 0, 539, 540, 542, 0, -2, 0, 0, 0, 0, 0, 357, 0, 0, 0, 364, 370, 381, 0, 371, 372, 373, 378, 374, 375, 376, 0, 0, 511, 0, -2, 0, 0, 0, 0, 530, 531, 361, 0, 0, 0, 0, 0, 793, 794, 797, 0, 0, 0, 0, 0, 0, 0, 826, 0, 830, 0, 0, 0, 0, 431, 0, 558, 0, 0, 263, 0, 0, 292, 0, 202, 561, 0, 392, 0, 397, 394, 395, 394, 394, 394, 394, 394, 0, 761, 0, 0, 0, 818, 819, 820, 821, 822, 823, 834, 0, 729, 0, 75, 32, 0, 723, 0, 0, 0, 672, 711, 715, 0, 0, 679, 0, 674, 545, 546, 547, 0, 0, 227, 0, 0, 154, 149, 150, 151, 152, 153, 0, 0, 78, 65, 0, 0, 0, 534, 218, 164, 0, 0, 0, 0, 0, 0, 0, 181, 0, 0, 0, 0, -2, 237, 238, 0, 251, 252, 813, 338, 311, 263, 0, 350, 352, 353, 310, 0, 0, 0, 0, 204, 0, 0, 0, 0, 0, 0, 523, -2, 526, 527, 527, 367, 368, 790, 795, 0, 803, 798, 801, 808, 810, 776, 802, 827, 828, 0, 0, 841, 0, 141, 559, 0, 0, 0, 0, 0, 0, 288, 0, 0, 291, 293, 294, 295, 296, 297, 298, 299, 300, 301, 302, 0, 0, 0, 204, 0, 0, 265, 0, 0, 0, 566, 567, 568, 569, 570, 571, 572, 573, 574, 575, 576, 577, 0, 582, 583, 584, 585, 591, 592, 593, 594, 595, 596, 597, 616, 616, 600, 616, 618, 604, 606, 0, 608, 0, 610, 612, 0, 614, 615, 267, 0, 396, 399, 400, 401, 402, 403, 404, 0, 97, 98, 99, 100, 101, 764, 766, 805, 716, 0, 0, 0, 721, 722, 0, 37, 35, 710, 714, 676, 677, 537, -2, 548, 229, 148, 0, 158, 143, 155, 134, 63, 74, 76, 77, 438, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 431, 0, 532, -2, -2, 0, 0, 165, 166, 240, 219, 219, 219, 219, 245, 246, 247, 248, 167, 254, 219, 219, 219, 258, 259, 260, 261, 175, 0, 0, 0, 0, 0, 184, 219, 234, 0, 541, 543, 339, 0, 0, 356, 0, 359, 360, 0, 0, 0, 45, 46, 515, 522, 0, 528, 529, 0, 829, 840, 774, 147, 561, 312, 313, 314, 315, 292, 290, 0, 0, 0, 185, 203, 194, 586, 0, 0, 0, 0, 0, 611, 578, 579, 580, 581, 605, 598, 0, 599, 601, 602, 619, 620, 621, 622, 623, 624, 625, 626, 627, 628, 629, 0, 634, 635, 636, 637, 638, 642, 643, 644, 645, 646, 647, 648, 649, 650, 652, 653, 654, 655, 656, 657, 658, 659, 660, 661, 662, 663, 664, 665, 666, 607, 609, 613, 201, 95, 763, 765, 0, 730, 731, 734, 735, 0, 737, 0, 732, 733, 717, 724, 78, 0, 0, 158, 157, 154, 0, 144, 145, 0, 80, 81, 82, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 66, 75, 70, 0, 0, 0, 0, 0, 533, 241, 242, 243, 244, 255, 256, 257, 219, 0, 180, 0, 183, 0, 544, 351, 0, 0, 205, 435, 436, 437, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 382, 383, 524, 0, 770, 0, 0, 0, 303, 304, 305, 306, 0, 587, 0, 0, 266, 0, 0, 0, 0, 0, 0, 640, 641, 630, 631, 632, 633, 651, 768, 0, 0, 0, 78, 678, 156, 159, 160, 0, 0, 86, 87, 88, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 429, 0, -2, -2, 210, 211, 0, 0, 0, 0, -2, 161, 358, 0, 0, 0, 0, 0, -2, 264, 289, 307, 588, 0, 0, 0, 0, 0, 0, 603, 639, 767, 0, 0, 0, 0, 0, 725, 0, 146, 0, 0, 0, 90, 439, 440, 0, 0, 442, 443, 0, 444, 0, 411, 413, 0, 412, 414, 0, 415, 0, 416, 0, 417, 0, 418, 0, 423, 0, 424, 0, 425, 0, 426, 0, 0, 0, 0, 0, 0, 0, 0, 0, 427, 0, 428, 0, 67, 0, 0, 163, 0, 161, 182, 0, 0, 162, 0, 0, 0, 0, 590, 0, 564, 561, 0, 736, 0, 0, 0, 741, 726, 0, 91, 89, 480, 441, 483, 487, 464, 467, 470, 472, 474, 476, 470, 472, 474, 476, 419, 0, 420, 0, 421, 0, 422, 0, 474, 478, 208, 209, 0, 0, 204, -2, 796, 316, 589, 0, 563, 565, 617, 0, 0, 0, 79, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 470, 472, 474, 476, 0, 0, 0, -2, 249, 0, 0, 0, 738, 739, 740, 461, 481, 482, 462, 484, 0, 486, 463, 488, 445, 465, 466, 446, 468, 469, 447, 471, 448, 473, 449, 475, 450, 477, 451, 452, 453, 454, 0, 0, 0, 0, 459, 460, 479, 0, 0, 354, 268, 317, 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, 0, 0, 485, 455, 456, 457, 458, -2, 0, 0, 0, 0, 0, 0, 562, 176, 219, 331, 0, 0, 0, 0, 161, 0, -2, 0, 177 }; #ifdef YYRECOVER YYSTATIC YYCONST short yyrecover[] = { -1000 }; #endif /* SCCSWHAT( "@(#)yypars.c 3.1 88/11/16 22:00:49 " ) */ #line 3 "F:\\NetFXDev1\\src\\tools\\devdiv\\amd64\\yypars.c" #if ! defined(YYAPI_PACKAGE) /* ** YYAPI_TOKENNAME : name used for return value of yylex ** YYAPI_TOKENTYPE : type of the token ** YYAPI_TOKENEME(t) : the value of the token that the parser should see ** YYAPI_TOKENNONE : the representation when there is no token ** YYAPI_VALUENAME : the name of the value of the token ** YYAPI_VALUETYPE : the type of the value of the token (if null, then the value is derivable from the token itself) ** YYAPI_VALUEOF(v) : how to get the value of the token. */ #define YYAPI_TOKENNAME yychar #define YYAPI_TOKENTYPE int #define YYAPI_TOKENEME(t) (t) #define YYAPI_TOKENNONE -1 #define YYAPI_TOKENSTR(t) (sprintf_s(yytokbuf, ARRAY_SIZE(yytokbuf), "%d", t), yytokbuf) #define YYAPI_VALUENAME yylval #define YYAPI_VALUETYPE YYSTYPE #define YYAPI_VALUEOF(v) (v) #endif #if ! defined(YYAPI_CALLAFTERYYLEX) #define YYAPI_CALLAFTERYYLEX #endif # define YYFLAG -1000 # define YYERROR goto yyerrlab # define YYACCEPT return(0) # define YYABORT return(1) #ifdef YYDEBUG /* RRR - 10/9/85 */ char yytokbuf[20]; # ifndef YYDBFLG # define YYDBFLG (yydebug) # endif # define yyprintf(a, b, c, d) if (YYDBFLG) YYPRINT(a, b, c, d) #else # define yyprintf(a, b, c, d) #endif #ifndef YYPRINT #define YYPRINT printf #endif /* parser for yacc output */ #ifdef YYDUMP int yydump = 1; /* 1 for dumping */ void yydumpinfo(void); #endif #ifdef YYDEBUG YYSTATIC int yydebug = 0; /* 1 for debugging */ #endif YYSTATIC YYSTYPE yyv[YYMAXDEPTH]; /* where the values are stored */ YYSTATIC short yys[YYMAXDEPTH]; /* the parse stack */ #if ! defined(YYRECURSIVE) YYSTATIC YYAPI_TOKENTYPE YYAPI_TOKENNAME = YYAPI_TOKENNONE; #if defined(YYAPI_VALUETYPE) // YYSTATIC YYAPI_VALUETYPE YYAPI_VALUENAME; FIX #endif YYSTATIC int yynerrs = 0; /* number of errors */ YYSTATIC short yyerrflag = 0; /* error recovery flag */ #endif #ifdef YYRECOVER /* ** yyscpy : copy f onto t and return a ptr to the null terminator at the ** end of t. */ YYSTATIC char *yyscpy(register char*t, register char*f) { while(*t = *f++) t++; return(t); /* ptr to the null char */ } #endif #ifndef YYNEAR #define YYNEAR #endif #ifndef YYPASCAL #define YYPASCAL #endif #ifndef YYLOCAL #define YYLOCAL #endif #if ! defined YYPARSER #define YYPARSER yyparse #endif #if ! defined YYLEX #define YYLEX yylex #endif #if defined(YYRECURSIVE) YYSTATIC YYAPI_TOKENTYPE YYAPI_TOKENNAME = YYAPI_TOKENNONE; #if defined(YYAPI_VALUETYPE) YYSTATIC YYAPI_VALUETYPE YYAPI_VALUENAME; #endif YYSTATIC int yynerrs = 0; /* number of errors */ YYSTATIC short yyerrflag = 0; /* error recovery flag */ YYSTATIC short yyn; YYSTATIC short yystate = 0; YYSTATIC short *yyps= &yys[-1]; YYSTATIC YYSTYPE *yypv= &yyv[-1]; YYSTATIC short yyj; YYSTATIC short yym; #endif #pragma warning(disable:102) YYLOCAL YYNEAR YYPASCAL YYPARSER() { #if ! defined(YYRECURSIVE) register short yyn; short yystate, *yyps; YYSTYPE *yypv; short yyj, yym; YYAPI_TOKENNAME = YYAPI_TOKENNONE; yystate = 0; #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable:6200) // Index '-1' is out of valid index range...for non-stack buffer... #endif yyps= &yys[-1]; yypv= &yyv[-1]; #ifdef _PREFAST_ #pragma warning(pop) #endif #endif #ifdef YYDUMP yydumpinfo(); #endif yystack: /* put a state and value onto the stack */ #ifdef YYDEBUG if(YYAPI_TOKENNAME == YYAPI_TOKENNONE) { yyprintf( "state %d, token # '%d'\n", yystate, -1, 0 ); } else { yyprintf( "state %d, token # '%s'\n", yystate, YYAPI_TOKENSTR(YYAPI_TOKENNAME), 0 ); } #endif if( ++yyps > &yys[YYMAXDEPTH] ) { yyerror( "yacc stack overflow" ); return(1); } *yyps = yystate; ++yypv; *yypv = yyval; yynewstate: yyn = YYPACT[yystate]; if( yyn <= YYFLAG ) { /* simple state, no lookahead */ goto yydefault; } if( YYAPI_TOKENNAME == YYAPI_TOKENNONE ) { /* need a lookahead */ YYAPI_TOKENNAME = YYLEX(); YYAPI_CALLAFTERYYLEX(YYAPI_TOKENNAME); } if( ((yyn += YYAPI_TOKENEME(YYAPI_TOKENNAME)) < 0) || (yyn >= YYLAST) ) { goto yydefault; } if( YYCHK[ yyn = YYACT[ yyn ] ] == YYAPI_TOKENEME(YYAPI_TOKENNAME) ) { /* valid shift */ yyval = YYAPI_VALUEOF(YYAPI_VALUENAME); yystate = yyn; yyprintf( "SHIFT: saw token '%s', now in state %4d\n", YYAPI_TOKENSTR(YYAPI_TOKENNAME), yystate, 0 ); YYAPI_TOKENNAME = YYAPI_TOKENNONE; if( yyerrflag > 0 ) { --yyerrflag; } goto yystack; } yydefault: /* default state action */ if( (yyn = YYDEF[yystate]) == -2 ) { register YYCONST short *yyxi; if( YYAPI_TOKENNAME == YYAPI_TOKENNONE ) { YYAPI_TOKENNAME = YYLEX(); YYAPI_CALLAFTERYYLEX(YYAPI_TOKENNAME); yyprintf("LOOKAHEAD: token '%s'\n", YYAPI_TOKENSTR(YYAPI_TOKENNAME), 0, 0); } /* ** search exception table, we find a -1 followed by the current state. ** if we find one, we'll look through terminal,state pairs. if we find ** a terminal which matches the current one, we have a match. ** the exception table is when we have a reduce on a terminal. */ #if YYOPTTIME yyxi = yyexca + yyexcaind[yystate]; while(( *yyxi != YYAPI_TOKENEME(YYAPI_TOKENNAME) ) && ( *yyxi >= 0 )){ yyxi += 2; } #else for(yyxi = yyexca; (*yyxi != (-1)) || (yyxi[1] != yystate); yyxi += 2 ) { ; /* VOID */ } while( *(yyxi += 2) >= 0 ){ if( *yyxi == YYAPI_TOKENEME(YYAPI_TOKENNAME) ) { break; } } #endif if( (yyn = yyxi[1]) < 0 ) { return(0); /* accept */ } } if( yyn == 0 ){ /* error */ /* error ... attempt to resume parsing */ switch( yyerrflag ){ case 0: /* brand new error */ #ifdef YYRECOVER { register int i,j; for(i = 0; (yyrecover[i] != -1000) && (yystate > yyrecover[i]); i += 3 ) { ; } if(yystate == yyrecover[i]) { yyprintf("recovered, from state %d to state %d on token # %d\n", yystate,yyrecover[i+2],yyrecover[i+1] ); j = yyrecover[i + 1]; if(j < 0) { /* ** here we have one of the injection set, so we're not quite ** sure that the next valid thing will be a shift. so we'll ** count it as an error and continue. ** actually we're not absolutely sure that the next token ** we were supposed to get is the one when j > 0. for example, ** for(+) {;} error recovery with yyerrflag always set, stops ** after inserting one ; before the +. at the point of the +, ** we're pretty sure the caller wants a 'for' loop. without ** setting the flag, when we're almost absolutely sure, we'll ** give them one, since the only thing we can shift on this ** error is after finding an expression followed by a + */ yyerrflag++; j = -j; } if(yyerrflag <= 1) { /* only on first insertion */ yyrecerr(YYAPI_TOKENNAME, j); /* what was, what should be first */ } yyval = yyeval(j); yystate = yyrecover[i + 2]; goto yystack; } } #endif yyerror("syntax error"); yyerrlab: ++yynerrs; FALLTHROUGH; case 1: case 2: /* incompletely recovered error ... try again */ yyerrflag = 3; /* find a state where "error" is a legal shift action */ while ( yyps >= yys ) { yyn = YYPACT[*yyps] + YYERRCODE; if( yyn>= 0 && yyn < YYLAST && YYCHK[YYACT[yyn]] == YYERRCODE ){ yystate = YYACT[yyn]; /* simulate a shift of "error" */ yyprintf( "SHIFT 'error': now in state %4d\n", yystate, 0, 0 ); goto yystack; } yyn = YYPACT[*yyps]; /* the current yyps has no shift onn "error", pop stack */ yyprintf( "error recovery pops state %4d, uncovers %4d\n", *yyps, yyps[-1], 0 ); --yyps; --yypv; } /* there is no state on the stack with an error shift ... abort */ yyabort: return(1); case 3: /* no shift yet; clobber input char */ yyprintf( "error recovery discards token '%s'\n", YYAPI_TOKENSTR(YYAPI_TOKENNAME), 0, 0 ); if( YYAPI_TOKENEME(YYAPI_TOKENNAME) == 0 ) goto yyabort; /* don't discard EOF, quit */ YYAPI_TOKENNAME = YYAPI_TOKENNONE; goto yynewstate; /* try again in the same state */ } } /* reduction by production yyn */ yyreduce: { register YYSTYPE *yypvt; yypvt = yypv; yyps -= YYR2[yyn]; yypv -= YYR2[yyn]; yyval = yypv[1]; yyprintf("REDUCE: rule %4d, popped %2d tokens, uncovered state %4d, ",yyn, YYR2[yyn], *yyps); yym = yyn; yyn = YYR1[yyn]; /* consult goto table to find next state */ yyj = YYPGO[yyn] + *yyps + 1; if( (yyj >= YYLAST) || (YYCHK[ yystate = YYACT[yyj] ] != -yyn) ) { yystate = YYACT[YYPGO[yyn]]; } yyprintf("goto state %4d\n", yystate, 0, 0); #ifdef YYDUMP yydumpinfo(); #endif switch(yym){ case 3: #line 194 "asmparse.y" { PASM->EndClass(); } break; case 4: #line 195 "asmparse.y" { PASM->EndNameSpace(); } break; case 5: #line 196 "asmparse.y" { if(PASM->m_pCurMethod->m_ulLines[1] ==0) { PASM->m_pCurMethod->m_ulLines[1] = PASM->m_ulCurLine; PASM->m_pCurMethod->m_ulColumns[1]=PASM->m_ulCurColumn;} PASM->EndMethod(); } break; case 12: #line 206 "asmparse.y" { PASMM->EndAssembly(); } break; case 13: #line 207 "asmparse.y" { PASMM->EndAssembly(); } break; case 14: #line 208 "asmparse.y" { PASMM->EndComType(); } break; case 15: #line 209 "asmparse.y" { PASMM->EndManifestRes(); } break; case 19: #line 213 "asmparse.y" { #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable:22011) // Suppress PREFast warning about integer overflow/underflow #endif PASM->m_dwSubsystem = yypvt[-0].int32; #ifdef _PREFAST_ #pragma warning(pop) #endif } break; case 20: #line 223 "asmparse.y" { PASM->m_dwComImageFlags = yypvt[-0].int32; } break; case 21: #line 224 "asmparse.y" { PASM->m_dwFileAlignment = yypvt[-0].int32; if((yypvt[-0].int32 & (yypvt[-0].int32 - 1))||(yypvt[-0].int32 < 0x200)||(yypvt[-0].int32 > 0x10000)) PASM->report->error("Invalid file alignment, must be power of 2 from 0x200 to 0x10000\n");} break; case 22: #line 227 "asmparse.y" { PASM->m_stBaseAddress = (ULONGLONG)(*(yypvt[-0].int64)); delete yypvt[-0].int64; if(PASM->m_stBaseAddress & 0xFFFF) PASM->report->error("Invalid image base, must be 0x10000-aligned\n");} break; case 23: #line 230 "asmparse.y" { PASM->m_stSizeOfStackReserve = (size_t)(*(yypvt[-0].int64)); delete yypvt[-0].int64; } break; case 28: #line 235 "asmparse.y" { PASM->m_fIsMscorlib = TRUE; } break; case 31: #line 242 "asmparse.y" { yyval.binstr = yypvt[-0].binstr; } break; case 32: #line 243 "asmparse.y" { yyval.binstr = yypvt[-2].binstr; yyval.binstr->append(yypvt[-0].binstr); delete yypvt[-0].binstr; } break; case 33: #line 246 "asmparse.y" { LPCSTRToGuid(yypvt[-0].string,&(PASM->m_guidLang)); } break; case 34: #line 247 "asmparse.y" { LPCSTRToGuid(yypvt[-2].string,&(PASM->m_guidLang)); LPCSTRToGuid(yypvt[-0].string,&(PASM->m_guidLangVendor));} break; case 35: #line 249 "asmparse.y" { LPCSTRToGuid(yypvt[-4].string,&(PASM->m_guidLang)); LPCSTRToGuid(yypvt[-2].string,&(PASM->m_guidLangVendor)); LPCSTRToGuid(yypvt[-2].string,&(PASM->m_guidDoc));} break; case 36: #line 254 "asmparse.y" { yyval.string = yypvt[-0].string; } break; case 37: #line 255 "asmparse.y" { yyval.string = yypvt[-0].string; } break; case 38: #line 258 "asmparse.y" { yyval.string = yypvt[-0].string; } break; case 39: #line 259 "asmparse.y" { yyval.string = yypvt[-0].string; } break; case 40: #line 260 "asmparse.y" { yyval.string = newStringWDel(yypvt[-2].string, '.', yypvt[-0].string); } break; case 41: #line 263 "asmparse.y" { yyval.int32 = yypvt[-0].int32; } break; case 42: #line 266 "asmparse.y" { yyval.int64 = yypvt[-0].int64; } break; case 43: #line 267 "asmparse.y" { yyval.int64 = neg ? new __int64(yypvt[-0].int32) : new __int64((unsigned)yypvt[-0].int32); } break; case 44: #line 270 "asmparse.y" { yyval.float64 = yypvt[-0].float64; } break; case 45: #line 271 "asmparse.y" { float f; *((__int32*) (&f)) = yypvt[-1].int32; yyval.float64 = new double(f); } break; case 46: #line 272 "asmparse.y" { yyval.float64 = (double*) yypvt[-1].int64; } break; case 47: #line 276 "asmparse.y" { PASM->AddTypeDef(yypvt[-2].binstr,yypvt[-0].string); } break; case 48: #line 277 "asmparse.y" { PASM->AddTypeDef(yypvt[-2].token,yypvt[-0].string); } break; case 49: #line 278 "asmparse.y" { PASM->AddTypeDef(yypvt[-2].token,yypvt[-0].string); } break; case 50: #line 279 "asmparse.y" { yypvt[-2].cad->tkOwner = 0; PASM->AddTypeDef(yypvt[-2].cad,yypvt[-0].string); } break; case 51: #line 280 "asmparse.y" { PASM->AddTypeDef(yypvt[-2].cad,yypvt[-0].string); } break; case 52: #line 285 "asmparse.y" { DefineVar(yypvt[-0].string, NULL); } break; case 53: #line 286 "asmparse.y" { DefineVar(yypvt[-1].string, yypvt[-0].binstr); } break; case 54: #line 287 "asmparse.y" { UndefVar(yypvt[-0].string); } break; case 55: #line 288 "asmparse.y" { SkipToken = !IsVarDefined(yypvt[-0].string); IfEndif++; } break; case 56: #line 291 "asmparse.y" { SkipToken = IsVarDefined(yypvt[-0].string); IfEndif++; } break; case 57: #line 294 "asmparse.y" { if(IfEndif == 1) SkipToken = !SkipToken;} break; case 58: #line 295 "asmparse.y" { if(IfEndif == 0) PASM->report->error("Unmatched #endif\n"); else IfEndif--; } break; case 59: #line 299 "asmparse.y" { _ASSERTE(!"yylex should have dealt with this"); } break; case 60: #line 300 "asmparse.y" { } break; case 61: #line 304 "asmparse.y" { yyval.cad = new CustomDescr(PASM->m_tkCurrentCVOwner, yypvt[-0].token, NULL); } break; case 62: #line 305 "asmparse.y" { yyval.cad = new CustomDescr(PASM->m_tkCurrentCVOwner, yypvt[-2].token, yypvt[-0].binstr); } break; case 63: #line 306 "asmparse.y" { yyval.cad = new CustomDescr(PASM->m_tkCurrentCVOwner, yypvt[-4].token, yypvt[-1].binstr); } break; case 64: #line 307 "asmparse.y" { yyval.cad = new CustomDescr(PASM->m_tkCurrentCVOwner, yypvt[-2].int32, yypvt[-1].binstr); } break; case 65: #line 310 "asmparse.y" { yyval.cad = new CustomDescr(yypvt[-2].token, yypvt[-0].token, NULL); } break; case 66: #line 311 "asmparse.y" { yyval.cad = new CustomDescr(yypvt[-4].token, yypvt[-2].token, yypvt[-0].binstr); } break; case 67: #line 313 "asmparse.y" { yyval.cad = new CustomDescr(yypvt[-6].token, yypvt[-4].token, yypvt[-1].binstr); } break; case 68: #line 314 "asmparse.y" { yyval.cad = new CustomDescr(PASM->m_tkCurrentCVOwner, yypvt[-2].int32, yypvt[-1].binstr); } break; case 69: #line 317 "asmparse.y" { yyval.int32 = yypvt[-2].token; bParsingByteArray = TRUE; } break; case 70: #line 321 "asmparse.y" { PASM->m_pCustomDescrList = NULL; PASM->m_tkCurrentCVOwner = yypvt[-4].token; yyval.int32 = yypvt[-2].token; bParsingByteArray = TRUE; } break; case 71: #line 326 "asmparse.y" { yyval.token = yypvt[-0].token; } break; case 72: #line 329 "asmparse.y" { yyval.token = yypvt[-0].token; } break; case 73: #line 330 "asmparse.y" { yyval.token = yypvt[-0].token; } break; case 74: #line 334 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->appendInt16(VAL16(nCustomBlobNVPairs)); yyval.binstr->append(yypvt[-0].binstr); nCustomBlobNVPairs = 0; } break; case 75: #line 340 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt16(VAL16(0x0001)); } break; case 76: #line 341 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; AppendFieldToCustomBlob(yyval.binstr,yypvt[-0].binstr); } break; case 77: #line 343 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; } break; case 78: #line 346 "asmparse.y" { yyval.binstr = new BinStr(); } break; case 79: #line 348 "asmparse.y" { yyval.binstr = yypvt[-5].binstr; yyval.binstr->appendInt8(yypvt[-4].int32); yyval.binstr->append(yypvt[-3].binstr); AppendStringWithLength(yyval.binstr,yypvt[-2].string); AppendFieldToCustomBlob(yyval.binstr,yypvt[-0].binstr); nCustomBlobNVPairs++; } break; case 80: #line 353 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; } break; case 81: #line 356 "asmparse.y" { yyval.int32 = SERIALIZATION_TYPE_FIELD; } break; case 82: #line 357 "asmparse.y" { yyval.int32 = SERIALIZATION_TYPE_PROPERTY; } break; case 83: #line 360 "asmparse.y" { if(yypvt[-0].cad->tkOwner && !yypvt[-0].cad->tkInterfacePair) PASM->DefineCV(yypvt[-0].cad); else if(PASM->m_pCustomDescrList) PASM->m_pCustomDescrList->PUSH(yypvt[-0].cad); } break; case 84: #line 364 "asmparse.y" { PASM->DefineCV(yypvt[-0].cad); } break; case 85: #line 365 "asmparse.y" { CustomDescr* pNew = new CustomDescr(yypvt[-0].tdd->m_pCA); if(pNew->tkOwner == 0) pNew->tkOwner = PASM->m_tkCurrentCVOwner; if(pNew->tkOwner) PASM->DefineCV(pNew); else if(PASM->m_pCustomDescrList) PASM->m_pCustomDescrList->PUSH(pNew); } break; case 86: #line 373 "asmparse.y" { yyval.binstr = yypvt[-0].binstr; } break; case 87: #line 374 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(SERIALIZATION_TYPE_TYPE); } break; case 88: #line 375 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(SERIALIZATION_TYPE_TAGGED_OBJECT); } break; case 89: #line 376 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(SERIALIZATION_TYPE_ENUM); AppendStringWithLength(yyval.binstr,yypvt[-0].string); } break; case 90: #line 378 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(SERIALIZATION_TYPE_ENUM); AppendStringWithLength(yyval.binstr,PASM->ReflectionNotation(yypvt[-0].token)); } break; case 91: #line 380 "asmparse.y" { yyval.binstr = yypvt[-2].binstr; yyval.binstr->insertInt8(ELEMENT_TYPE_SZARRAY); } break; case 92: #line 385 "asmparse.y" { PASMM->SetModuleName(NULL); PASM->m_tkCurrentCVOwner=1; } break; case 93: #line 386 "asmparse.y" { PASMM->SetModuleName(yypvt[-0].string); PASM->m_tkCurrentCVOwner=1; } break; case 94: #line 387 "asmparse.y" { BinStr* pbs = new BinStr(); unsigned L = (unsigned)strlen(yypvt[-0].string); memcpy((char*)(pbs->getBuff(L)),yypvt[-0].string,L); PASM->EmitImport(pbs); delete pbs;} break; case 95: #line 394 "asmparse.y" { /*PASM->SetDataSection(); PASM->EmitDataLabel($7);*/ PASM->m_VTFList.PUSH(new VTFEntry((USHORT)yypvt[-4].int32, (USHORT)yypvt[-2].int32, yypvt[-0].string)); } break; case 96: #line 398 "asmparse.y" { yyval.int32 = 0; } break; case 97: #line 399 "asmparse.y" { yyval.int32 = yypvt[-1].int32 | COR_VTABLE_32BIT; } break; case 98: #line 400 "asmparse.y" { yyval.int32 = yypvt[-1].int32 | COR_VTABLE_64BIT; } break; case 99: #line 401 "asmparse.y" { yyval.int32 = yypvt[-1].int32 | COR_VTABLE_FROM_UNMANAGED; } break; case 100: #line 402 "asmparse.y" { yyval.int32 = yypvt[-1].int32 | COR_VTABLE_CALL_MOST_DERIVED; } break; case 101: #line 403 "asmparse.y" { yyval.int32 = yypvt[-1].int32 | COR_VTABLE_FROM_UNMANAGED_RETAIN_APPDOMAIN; } break; case 102: #line 406 "asmparse.y" { PASM->m_pVTable = yypvt[-1].binstr; } break; case 103: #line 409 "asmparse.y" { bParsingByteArray = TRUE; } break; case 104: #line 413 "asmparse.y" { PASM->StartNameSpace(yypvt[-0].string); } break; case 105: #line 416 "asmparse.y" { newclass = TRUE; } break; case 106: #line 419 "asmparse.y" { if(yypvt[-0].typarlist) FixupConstraints(); PASM->StartClass(yypvt[-1].string, yypvt[-2].classAttr, yypvt[-0].typarlist); TyParFixupList.RESET(false); newclass = FALSE; } break; case 107: #line 425 "asmparse.y" { PASM->AddClass(); } break; case 108: #line 428 "asmparse.y" { yyval.classAttr = (CorRegTypeAttr) 0; } break; case 109: #line 429 "asmparse.y" { yyval.classAttr = (CorRegTypeAttr) ((yypvt[-1].classAttr & ~tdVisibilityMask) | tdPublic); } break; case 110: #line 430 "asmparse.y" { yyval.classAttr = (CorRegTypeAttr) ((yypvt[-1].classAttr & ~tdVisibilityMask) | tdNotPublic); } break; case 111: #line 431 "asmparse.y" { yyval.classAttr = (CorRegTypeAttr) (yypvt[-1].classAttr | 0x80000000 | tdSealed); } break; case 112: #line 432 "asmparse.y" { yyval.classAttr = (CorRegTypeAttr) (yypvt[-1].classAttr | 0x40000000); } break; case 113: #line 433 "asmparse.y" { yyval.classAttr = (CorRegTypeAttr) (yypvt[-1].classAttr | tdInterface | tdAbstract); } break; case 114: #line 434 "asmparse.y" { yyval.classAttr = (CorRegTypeAttr) (yypvt[-1].classAttr | tdSealed); } break; case 115: #line 435 "asmparse.y" { yyval.classAttr = (CorRegTypeAttr) (yypvt[-1].classAttr | tdAbstract); } break; case 116: #line 436 "asmparse.y" { yyval.classAttr = (CorRegTypeAttr) ((yypvt[-1].classAttr & ~tdLayoutMask) | tdAutoLayout); } break; case 117: #line 437 "asmparse.y" { yyval.classAttr = (CorRegTypeAttr) ((yypvt[-1].classAttr & ~tdLayoutMask) | tdSequentialLayout); } break; case 118: #line 438 "asmparse.y" { yyval.classAttr = (CorRegTypeAttr) ((yypvt[-1].classAttr & ~tdLayoutMask) | tdExplicitLayout); } break; case 119: #line 439 "asmparse.y" { yyval.classAttr = (CorRegTypeAttr) ((yypvt[-1].classAttr & ~tdStringFormatMask) | tdAnsiClass); } break; case 120: #line 440 "asmparse.y" { yyval.classAttr = (CorRegTypeAttr) ((yypvt[-1].classAttr & ~tdStringFormatMask) | tdUnicodeClass); } break; case 121: #line 441 "asmparse.y" { yyval.classAttr = (CorRegTypeAttr) ((yypvt[-1].classAttr & ~tdStringFormatMask) | tdAutoClass); } break; case 122: #line 442 "asmparse.y" { yyval.classAttr = (CorRegTypeAttr) (yypvt[-1].classAttr | tdImport); } break; case 123: #line 443 "asmparse.y" { yyval.classAttr = (CorRegTypeAttr) (yypvt[-1].classAttr | tdSerializable); } break; case 124: #line 444 "asmparse.y" { yyval.classAttr = (CorRegTypeAttr) (yypvt[-1].classAttr | tdWindowsRuntime); } break; case 125: #line 445 "asmparse.y" { yyval.classAttr = (CorRegTypeAttr) ((yypvt[-2].classAttr & ~tdVisibilityMask) | tdNestedPublic); } break; case 126: #line 446 "asmparse.y" { yyval.classAttr = (CorRegTypeAttr) ((yypvt[-2].classAttr & ~tdVisibilityMask) | tdNestedPrivate); } break; case 127: #line 447 "asmparse.y" { yyval.classAttr = (CorRegTypeAttr) ((yypvt[-2].classAttr & ~tdVisibilityMask) | tdNestedFamily); } break; case 128: #line 448 "asmparse.y" { yyval.classAttr = (CorRegTypeAttr) ((yypvt[-2].classAttr & ~tdVisibilityMask) | tdNestedAssembly); } break; case 129: #line 449 "asmparse.y" { yyval.classAttr = (CorRegTypeAttr) ((yypvt[-2].classAttr & ~tdVisibilityMask) | tdNestedFamANDAssem); } break; case 130: #line 450 "asmparse.y" { yyval.classAttr = (CorRegTypeAttr) ((yypvt[-2].classAttr & ~tdVisibilityMask) | tdNestedFamORAssem); } break; case 131: #line 451 "asmparse.y" { yyval.classAttr = (CorRegTypeAttr) (yypvt[-1].classAttr | tdBeforeFieldInit); } break; case 132: #line 452 "asmparse.y" { yyval.classAttr = (CorRegTypeAttr) (yypvt[-1].classAttr | tdSpecialName); } break; case 133: #line 453 "asmparse.y" { yyval.classAttr = (CorRegTypeAttr) (yypvt[-1].classAttr); } break; case 134: #line 454 "asmparse.y" { yyval.classAttr = (CorRegTypeAttr) (yypvt[-1].int32); } break; case 136: #line 458 "asmparse.y" { PASM->m_crExtends = yypvt[-0].token; } break; case 141: #line 469 "asmparse.y" { PASM->AddToImplList(yypvt[-0].token); } break; case 142: #line 470 "asmparse.y" { PASM->AddToImplList(yypvt[-0].token); } break; case 143: #line 474 "asmparse.y" { yyval.binstr = new BinStr(); } break; case 144: #line 475 "asmparse.y" { yyval.binstr = yypvt[-0].binstr; } break; case 145: #line 478 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt32(yypvt[-0].token); } break; case 146: #line 479 "asmparse.y" { yyval.binstr = yypvt[-2].binstr; yyval.binstr->appendInt32(yypvt[-0].token); } break; case 147: #line 482 "asmparse.y" { yyval.typarlist = NULL; PASM->m_TyParList = NULL;} break; case 148: #line 483 "asmparse.y" { yyval.typarlist = yypvt[-1].typarlist; PASM->m_TyParList = yypvt[-1].typarlist;} break; case 149: #line 486 "asmparse.y" { yyval.int32 = gpCovariant; } break; case 150: #line 487 "asmparse.y" { yyval.int32 = gpContravariant; } break; case 151: #line 488 "asmparse.y" { yyval.int32 = gpReferenceTypeConstraint; } break; case 152: #line 489 "asmparse.y" { yyval.int32 = gpNotNullableValueTypeConstraint; } break; case 153: #line 490 "asmparse.y" { yyval.int32 = gpDefaultConstructorConstraint; } break; case 154: #line 493 "asmparse.y" { yyval.int32 = 0; } break; case 155: #line 494 "asmparse.y" { yyval.int32 = yypvt[-1].int32 | yypvt[-0].int32; } break; case 156: #line 497 "asmparse.y" {yyval.typarlist = new TyParList(yypvt[-3].int32, yypvt[-2].binstr, yypvt[-1].string, yypvt[-0].typarlist);} break; case 157: #line 498 "asmparse.y" {yyval.typarlist = new TyParList(yypvt[-2].int32, NULL, yypvt[-1].string, yypvt[-0].typarlist);} break; case 158: #line 501 "asmparse.y" { yyval.typarlist = NULL; } break; case 159: #line 502 "asmparse.y" { yyval.typarlist = yypvt[-0].typarlist; } break; case 160: #line 505 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; } break; case 161: #line 508 "asmparse.y" { yyval.int32= 0; } break; case 162: #line 509 "asmparse.y" { yyval.int32 = yypvt[-0].int32; } break; case 163: #line 512 "asmparse.y" { yyval.int32 = yypvt[-2].int32; } break; case 164: #line 516 "asmparse.y" { if(PASM->m_pCurMethod->m_ulLines[1] ==0) { PASM->m_pCurMethod->m_ulLines[1] = PASM->m_ulCurLine; PASM->m_pCurMethod->m_ulColumns[1]=PASM->m_ulCurColumn;} PASM->EndMethod(); } break; case 165: #line 520 "asmparse.y" { PASM->EndClass(); } break; case 166: #line 521 "asmparse.y" { PASM->EndEvent(); } break; case 167: #line 522 "asmparse.y" { PASM->EndProp(); } break; case 173: #line 528 "asmparse.y" { PASM->m_pCurClass->m_ulSize = yypvt[-0].int32; } break; case 174: #line 529 "asmparse.y" { PASM->m_pCurClass->m_ulPack = yypvt[-0].int32; } break; case 175: #line 530 "asmparse.y" { PASMM->EndComType(); } break; case 176: #line 532 "asmparse.y" { BinStr *sig1 = parser->MakeSig(yypvt[-7].int32, yypvt[-6].binstr, yypvt[-1].binstr); BinStr *sig2 = new BinStr(); sig2->append(sig1); PASM->AddMethodImpl(yypvt[-11].token,yypvt[-9].string,sig1,yypvt[-5].token,yypvt[-3].string,sig2); PASM->ResetArgNameList(); } break; case 177: #line 538 "asmparse.y" { PASM->AddMethodImpl(yypvt[-17].token,yypvt[-15].string, (yypvt[-14].int32==0 ? parser->MakeSig(yypvt[-19].int32,yypvt[-18].binstr,yypvt[-12].binstr) : parser->MakeSig(yypvt[-19].int32| IMAGE_CEE_CS_CALLCONV_GENERIC,yypvt[-18].binstr,yypvt[-12].binstr,yypvt[-14].int32)), yypvt[-6].token,yypvt[-4].string, (yypvt[-3].int32==0 ? parser->MakeSig(yypvt[-8].int32,yypvt[-7].binstr,yypvt[-1].binstr) : parser->MakeSig(yypvt[-8].int32| IMAGE_CEE_CS_CALLCONV_GENERIC,yypvt[-7].binstr,yypvt[-1].binstr,yypvt[-3].int32))); PASM->ResetArgNameList(); } break; case 180: #line 548 "asmparse.y" { if((yypvt[-1].int32 > 0) && (yypvt[-1].int32 <= (int)PASM->m_pCurClass->m_NumTyPars)) PASM->m_pCustomDescrList = PASM->m_pCurClass->m_TyPars[yypvt[-1].int32-1].CAList(); else PASM->report->error("Type parameter index out of range\n"); } break; case 181: #line 553 "asmparse.y" { int n = PASM->m_pCurClass->FindTyPar(yypvt[-0].string); if(n >= 0) PASM->m_pCustomDescrList = PASM->m_pCurClass->m_TyPars[n].CAList(); else PASM->report->error("Type parameter '%s' undefined\n",yypvt[-0].string); } break; case 182: #line 559 "asmparse.y" { PASM->AddGenericParamConstraint(yypvt[-3].int32, 0, yypvt[-0].token); } break; case 183: #line 560 "asmparse.y" { PASM->AddGenericParamConstraint(0, yypvt[-2].string, yypvt[-0].token); } break; case 184: #line 561 "asmparse.y" { yypvt[-0].cad->tkInterfacePair = yypvt[-1].token; if(PASM->m_pCustomDescrList) PASM->m_pCustomDescrList->PUSH(yypvt[-0].cad); } break; case 185: #line 569 "asmparse.y" { yypvt[-3].binstr->insertInt8(IMAGE_CEE_CS_CALLCONV_FIELD); PASM->AddField(yypvt[-2].string, yypvt[-3].binstr, yypvt[-4].fieldAttr, yypvt[-1].string, yypvt[-0].binstr, yypvt[-5].int32); } break; case 186: #line 573 "asmparse.y" { yyval.fieldAttr = (CorFieldAttr) 0; } break; case 187: #line 574 "asmparse.y" { yyval.fieldAttr = (CorFieldAttr) (yypvt[-1].fieldAttr | fdStatic); } break; case 188: #line 575 "asmparse.y" { yyval.fieldAttr = (CorFieldAttr) ((yypvt[-1].fieldAttr & ~mdMemberAccessMask) | fdPublic); } break; case 189: #line 576 "asmparse.y" { yyval.fieldAttr = (CorFieldAttr) ((yypvt[-1].fieldAttr & ~mdMemberAccessMask) | fdPrivate); } break; case 190: #line 577 "asmparse.y" { yyval.fieldAttr = (CorFieldAttr) ((yypvt[-1].fieldAttr & ~mdMemberAccessMask) | fdFamily); } break; case 191: #line 578 "asmparse.y" { yyval.fieldAttr = (CorFieldAttr) (yypvt[-1].fieldAttr | fdInitOnly); } break; case 192: #line 579 "asmparse.y" { yyval.fieldAttr = yypvt[-1].fieldAttr; } break; case 193: #line 580 "asmparse.y" { yyval.fieldAttr = (CorFieldAttr) (yypvt[-1].fieldAttr | fdSpecialName); } break; case 194: #line 593 "asmparse.y" { PASM->m_pMarshal = yypvt[-1].binstr; } break; case 195: #line 594 "asmparse.y" { yyval.fieldAttr = (CorFieldAttr) ((yypvt[-1].fieldAttr & ~mdMemberAccessMask) | fdAssembly); } break; case 196: #line 595 "asmparse.y" { yyval.fieldAttr = (CorFieldAttr) ((yypvt[-1].fieldAttr & ~mdMemberAccessMask) | fdFamANDAssem); } break; case 197: #line 596 "asmparse.y" { yyval.fieldAttr = (CorFieldAttr) ((yypvt[-1].fieldAttr & ~mdMemberAccessMask) | fdFamORAssem); } break; case 198: #line 597 "asmparse.y" { yyval.fieldAttr = (CorFieldAttr) ((yypvt[-1].fieldAttr & ~mdMemberAccessMask) | fdPrivateScope); } break; case 199: #line 598 "asmparse.y" { yyval.fieldAttr = (CorFieldAttr) (yypvt[-1].fieldAttr | fdLiteral); } break; case 200: #line 599 "asmparse.y" { yyval.fieldAttr = (CorFieldAttr) (yypvt[-1].fieldAttr | fdNotSerialized); } break; case 201: #line 600 "asmparse.y" { yyval.fieldAttr = (CorFieldAttr) (yypvt[-1].int32); } break; case 202: #line 603 "asmparse.y" { yyval.string = 0; } break; case 203: #line 604 "asmparse.y" { yyval.string = yypvt[-0].string; } break; case 204: #line 607 "asmparse.y" { yyval.binstr = NULL; } break; case 205: #line 608 "asmparse.y" { yyval.binstr = yypvt[-0].binstr; } break; case 206: #line 611 "asmparse.y" { yyval.int32 = 0xFFFFFFFF; } break; case 207: #line 612 "asmparse.y" { yyval.int32 = yypvt[-1].int32; } break; case 208: #line 617 "asmparse.y" { PASM->ResetArgNameList(); if (yypvt[-3].binstr == NULL) { if((iCallConv)&&((yypvt[-8].int32 & iCallConv) != iCallConv)) parser->warn("'instance' added to method's calling convention\n"); yyval.token = PASM->MakeMemberRef(yypvt[-6].token, yypvt[-4].string, parser->MakeSig(yypvt[-8].int32|iCallConv, yypvt[-7].binstr, yypvt[-1].binstr)); } else { mdToken mr; if((iCallConv)&&((yypvt[-8].int32 & iCallConv) != iCallConv)) parser->warn("'instance' added to method's calling convention\n"); mr = PASM->MakeMemberRef(yypvt[-6].token, yypvt[-4].string, parser->MakeSig(yypvt[-8].int32 | IMAGE_CEE_CS_CALLCONV_GENERIC|iCallConv, yypvt[-7].binstr, yypvt[-1].binstr, corCountArgs(yypvt[-3].binstr))); yyval.token = PASM->MakeMethodSpec(mr, parser->MakeSig(IMAGE_CEE_CS_CALLCONV_INSTANTIATION, 0, yypvt[-3].binstr)); } } break; case 209: #line 634 "asmparse.y" { PASM->ResetArgNameList(); if((iCallConv)&&((yypvt[-8].int32 & iCallConv) != iCallConv)) parser->warn("'instance' added to method's calling convention\n"); yyval.token = PASM->MakeMemberRef(yypvt[-6].token, yypvt[-4].string, parser->MakeSig(yypvt[-8].int32 | IMAGE_CEE_CS_CALLCONV_GENERIC|iCallConv, yypvt[-7].binstr, yypvt[-1].binstr, yypvt[-3].int32)); } break; case 210: #line 640 "asmparse.y" { PASM->ResetArgNameList(); if (yypvt[-3].binstr == NULL) { if((iCallConv)&&((yypvt[-6].int32 & iCallConv) != iCallConv)) parser->warn("'instance' added to method's calling convention\n"); yyval.token = PASM->MakeMemberRef(mdTokenNil, yypvt[-4].string, parser->MakeSig(yypvt[-6].int32|iCallConv, yypvt[-5].binstr, yypvt[-1].binstr)); } else { mdToken mr; if((iCallConv)&&((yypvt[-6].int32 & iCallConv) != iCallConv)) parser->warn("'instance' added to method's calling convention\n"); mr = PASM->MakeMemberRef(mdTokenNil, yypvt[-4].string, parser->MakeSig(yypvt[-6].int32 | IMAGE_CEE_CS_CALLCONV_GENERIC|iCallConv, yypvt[-5].binstr, yypvt[-1].binstr, corCountArgs(yypvt[-3].binstr))); yyval.token = PASM->MakeMethodSpec(mr, parser->MakeSig(IMAGE_CEE_CS_CALLCONV_INSTANTIATION, 0, yypvt[-3].binstr)); } } break; case 211: #line 656 "asmparse.y" { PASM->ResetArgNameList(); if((iCallConv)&&((yypvt[-6].int32 & iCallConv) != iCallConv)) parser->warn("'instance' added to method's calling convention\n"); yyval.token = PASM->MakeMemberRef(mdTokenNil, yypvt[-4].string, parser->MakeSig(yypvt[-6].int32 | IMAGE_CEE_CS_CALLCONV_GENERIC|iCallConv, yypvt[-5].binstr, yypvt[-1].binstr, yypvt[-3].int32)); } break; case 212: #line 660 "asmparse.y" { yyval.token = yypvt[-0].token; } break; case 213: #line 661 "asmparse.y" { yyval.token = yypvt[-0].tdd->m_tkTypeSpec; } break; case 214: #line 662 "asmparse.y" { yyval.token = yypvt[-0].tdd->m_tkTypeSpec; } break; case 215: #line 665 "asmparse.y" { yyval.int32 = (yypvt[-0].int32 | IMAGE_CEE_CS_CALLCONV_HASTHIS); } break; case 216: #line 666 "asmparse.y" { yyval.int32 = (yypvt[-0].int32 | IMAGE_CEE_CS_CALLCONV_EXPLICITTHIS); } break; case 217: #line 667 "asmparse.y" { yyval.int32 = yypvt[-0].int32; } break; case 218: #line 668 "asmparse.y" { yyval.int32 = yypvt[-1].int32; } break; case 219: #line 671 "asmparse.y" { yyval.int32 = IMAGE_CEE_CS_CALLCONV_DEFAULT; } break; case 220: #line 672 "asmparse.y" { yyval.int32 = IMAGE_CEE_CS_CALLCONV_DEFAULT; } break; case 221: #line 673 "asmparse.y" { yyval.int32 = IMAGE_CEE_CS_CALLCONV_VARARG; } break; case 222: #line 674 "asmparse.y" { yyval.int32 = IMAGE_CEE_CS_CALLCONV_C; } break; case 223: #line 675 "asmparse.y" { yyval.int32 = IMAGE_CEE_CS_CALLCONV_STDCALL; } break; case 224: #line 676 "asmparse.y" { yyval.int32 = IMAGE_CEE_CS_CALLCONV_THISCALL; } break; case 225: #line 677 "asmparse.y" { yyval.int32 = IMAGE_CEE_CS_CALLCONV_FASTCALL; } break; case 226: #line 678 "asmparse.y" { yyval.int32 = IMAGE_CEE_CS_CALLCONV_UNMANAGED; } break; case 227: #line 681 "asmparse.y" { yyval.token = yypvt[-1].int32; } break; case 228: #line 684 "asmparse.y" { yyval.token = yypvt[-0].token; PASM->delArgNameList(PASM->m_firstArgName); PASM->m_firstArgName = parser->m_ANSFirst.POP(); PASM->m_lastArgName = parser->m_ANSLast.POP(); PASM->SetMemberRefFixup(yypvt[-0].token,iOpcodeLen); } break; case 229: #line 690 "asmparse.y" { yypvt[-3].binstr->insertInt8(IMAGE_CEE_CS_CALLCONV_FIELD); yyval.token = PASM->MakeMemberRef(yypvt[-2].token, yypvt[-0].string, yypvt[-3].binstr); PASM->SetMemberRefFixup(yyval.token,iOpcodeLen); } break; case 230: #line 694 "asmparse.y" { yypvt[-1].binstr->insertInt8(IMAGE_CEE_CS_CALLCONV_FIELD); yyval.token = PASM->MakeMemberRef(NULL, yypvt[-0].string, yypvt[-1].binstr); PASM->SetMemberRefFixup(yyval.token,iOpcodeLen); } break; case 231: #line 697 "asmparse.y" { yyval.token = yypvt[-0].tdd->m_tkTypeSpec; PASM->SetMemberRefFixup(yyval.token,iOpcodeLen); } break; case 232: #line 699 "asmparse.y" { yyval.token = yypvt[-0].tdd->m_tkTypeSpec; PASM->SetMemberRefFixup(yyval.token,iOpcodeLen); } break; case 233: #line 701 "asmparse.y" { yyval.token = yypvt[-0].token; PASM->SetMemberRefFixup(yyval.token,iOpcodeLen); } break; case 234: #line 706 "asmparse.y" { PASM->ResetEvent(yypvt[-0].string, yypvt[-1].token, yypvt[-2].eventAttr); } break; case 235: #line 707 "asmparse.y" { PASM->ResetEvent(yypvt[-0].string, mdTypeRefNil, yypvt[-1].eventAttr); } break; case 236: #line 711 "asmparse.y" { yyval.eventAttr = (CorEventAttr) 0; } break; case 237: #line 712 "asmparse.y" { yyval.eventAttr = yypvt[-1].eventAttr; } break; case 238: #line 713 "asmparse.y" { yyval.eventAttr = (CorEventAttr) (yypvt[-1].eventAttr | evSpecialName); } break; case 241: #line 720 "asmparse.y" { PASM->SetEventMethod(0, yypvt[-0].token); } break; case 242: #line 721 "asmparse.y" { PASM->SetEventMethod(1, yypvt[-0].token); } break; case 243: #line 722 "asmparse.y" { PASM->SetEventMethod(2, yypvt[-0].token); } break; case 244: #line 723 "asmparse.y" { PASM->SetEventMethod(3, yypvt[-0].token); } break; case 249: #line 732 "asmparse.y" { PASM->ResetProp(yypvt[-4].string, parser->MakeSig((IMAGE_CEE_CS_CALLCONV_PROPERTY | (yypvt[-6].int32 & IMAGE_CEE_CS_CALLCONV_HASTHIS)),yypvt[-5].binstr,yypvt[-2].binstr), yypvt[-7].propAttr, yypvt[-0].binstr);} break; case 250: #line 737 "asmparse.y" { yyval.propAttr = (CorPropertyAttr) 0; } break; case 251: #line 738 "asmparse.y" { yyval.propAttr = yypvt[-1].propAttr; } break; case 252: #line 739 "asmparse.y" { yyval.propAttr = (CorPropertyAttr) (yypvt[-1].propAttr | prSpecialName); } break; case 255: #line 747 "asmparse.y" { PASM->SetPropMethod(0, yypvt[-0].token); } break; case 256: #line 748 "asmparse.y" { PASM->SetPropMethod(1, yypvt[-0].token); } break; case 257: #line 749 "asmparse.y" { PASM->SetPropMethod(2, yypvt[-0].token); } break; case 262: #line 757 "asmparse.y" { PASM->ResetForNextMethod(); uMethodBeginLine = PASM->m_ulCurLine; uMethodBeginColumn=PASM->m_ulCurColumn; } break; case 263: #line 763 "asmparse.y" { yyval.binstr = NULL; } break; case 264: #line 764 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; } break; case 265: #line 767 "asmparse.y" { yyval.binstr = yypvt[-0].binstr; } break; case 266: #line 768 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; } break; case 267: #line 771 "asmparse.y" { bParsingByteArray = TRUE; } break; case 268: #line 775 "asmparse.y" { BinStr* sig; if (yypvt[-5].typarlist == NULL) sig = parser->MakeSig(yypvt[-10].int32, yypvt[-8].binstr, yypvt[-3].binstr); else { FixupTyPars(yypvt[-8].binstr); sig = parser->MakeSig(yypvt[-10].int32 | IMAGE_CEE_CS_CALLCONV_GENERIC, yypvt[-8].binstr, yypvt[-3].binstr, yypvt[-5].typarlist->Count()); FixupConstraints(); } PASM->StartMethod(yypvt[-6].string, sig, yypvt[-11].methAttr, yypvt[-7].binstr, yypvt[-9].int32, yypvt[-5].typarlist); TyParFixupList.RESET(false); PASM->SetImplAttr((USHORT)yypvt[-1].implAttr); PASM->m_pCurMethod->m_ulLines[0] = uMethodBeginLine; PASM->m_pCurMethod->m_ulColumns[0]=uMethodBeginColumn; } break; case 269: #line 790 "asmparse.y" { yyval.methAttr = (CorMethodAttr) 0; } break; case 270: #line 791 "asmparse.y" { yyval.methAttr = (CorMethodAttr) (yypvt[-1].methAttr | mdStatic); } break; case 271: #line 792 "asmparse.y" { yyval.methAttr = (CorMethodAttr) ((yypvt[-1].methAttr & ~mdMemberAccessMask) | mdPublic); } break; case 272: #line 793 "asmparse.y" { yyval.methAttr = (CorMethodAttr) ((yypvt[-1].methAttr & ~mdMemberAccessMask) | mdPrivate); } break; case 273: #line 794 "asmparse.y" { yyval.methAttr = (CorMethodAttr) ((yypvt[-1].methAttr & ~mdMemberAccessMask) | mdFamily); } break; case 274: #line 795 "asmparse.y" { yyval.methAttr = (CorMethodAttr) (yypvt[-1].methAttr | mdFinal); } break; case 275: #line 796 "asmparse.y" { yyval.methAttr = (CorMethodAttr) (yypvt[-1].methAttr | mdSpecialName); } break; case 276: #line 797 "asmparse.y" { yyval.methAttr = (CorMethodAttr) (yypvt[-1].methAttr | mdVirtual); } break; case 277: #line 798 "asmparse.y" { yyval.methAttr = (CorMethodAttr) (yypvt[-1].methAttr | mdCheckAccessOnOverride); } break; case 278: #line 799 "asmparse.y" { yyval.methAttr = (CorMethodAttr) (yypvt[-1].methAttr | mdAbstract); } break; case 279: #line 800 "asmparse.y" { yyval.methAttr = (CorMethodAttr) ((yypvt[-1].methAttr & ~mdMemberAccessMask) | mdAssem); } break; case 280: #line 801 "asmparse.y" { yyval.methAttr = (CorMethodAttr) ((yypvt[-1].methAttr & ~mdMemberAccessMask) | mdFamANDAssem); } break; case 281: #line 802 "asmparse.y" { yyval.methAttr = (CorMethodAttr) ((yypvt[-1].methAttr & ~mdMemberAccessMask) | mdFamORAssem); } break; case 282: #line 803 "asmparse.y" { yyval.methAttr = (CorMethodAttr) ((yypvt[-1].methAttr & ~mdMemberAccessMask) | mdPrivateScope); } break; case 283: #line 804 "asmparse.y" { yyval.methAttr = (CorMethodAttr) (yypvt[-1].methAttr | mdHideBySig); } break; case 284: #line 805 "asmparse.y" { yyval.methAttr = (CorMethodAttr) (yypvt[-1].methAttr | mdNewSlot); } break; case 285: #line 806 "asmparse.y" { yyval.methAttr = yypvt[-1].methAttr; } break; case 286: #line 807 "asmparse.y" { yyval.methAttr = (CorMethodAttr) (yypvt[-1].methAttr | mdUnmanagedExport); } break; case 287: #line 808 "asmparse.y" { yyval.methAttr = (CorMethodAttr) (yypvt[-1].methAttr | mdRequireSecObject); } break; case 288: #line 809 "asmparse.y" { yyval.methAttr = (CorMethodAttr) (yypvt[-1].int32); } break; case 289: #line 811 "asmparse.y" { PASM->SetPinvoke(yypvt[-4].binstr,0,yypvt[-2].binstr,yypvt[-1].pinvAttr); yyval.methAttr = (CorMethodAttr) (yypvt[-7].methAttr | mdPinvokeImpl); } break; case 290: #line 814 "asmparse.y" { PASM->SetPinvoke(yypvt[-2].binstr,0,NULL,yypvt[-1].pinvAttr); yyval.methAttr = (CorMethodAttr) (yypvt[-5].methAttr | mdPinvokeImpl); } break; case 291: #line 817 "asmparse.y" { PASM->SetPinvoke(new BinStr(),0,NULL,yypvt[-1].pinvAttr); yyval.methAttr = (CorMethodAttr) (yypvt[-4].methAttr | mdPinvokeImpl); } break; case 292: #line 821 "asmparse.y" { yyval.pinvAttr = (CorPinvokeMap) 0; } break; case 293: #line 822 "asmparse.y" { yyval.pinvAttr = (CorPinvokeMap) (yypvt[-1].pinvAttr | pmNoMangle); } break; case 294: #line 823 "asmparse.y" { yyval.pinvAttr = (CorPinvokeMap) (yypvt[-1].pinvAttr | pmCharSetAnsi); } break; case 295: #line 824 "asmparse.y" { yyval.pinvAttr = (CorPinvokeMap) (yypvt[-1].pinvAttr | pmCharSetUnicode); } break; case 296: #line 825 "asmparse.y" { yyval.pinvAttr = (CorPinvokeMap) (yypvt[-1].pinvAttr | pmCharSetAuto); } break; case 297: #line 826 "asmparse.y" { yyval.pinvAttr = (CorPinvokeMap) (yypvt[-1].pinvAttr | pmSupportsLastError); } break; case 298: #line 827 "asmparse.y" { yyval.pinvAttr = (CorPinvokeMap) (yypvt[-1].pinvAttr | pmCallConvWinapi); } break; case 299: #line 828 "asmparse.y" { yyval.pinvAttr = (CorPinvokeMap) (yypvt[-1].pinvAttr | pmCallConvCdecl); } break; case 300: #line 829 "asmparse.y" { yyval.pinvAttr = (CorPinvokeMap) (yypvt[-1].pinvAttr | pmCallConvStdcall); } break; case 301: #line 830 "asmparse.y" { yyval.pinvAttr = (CorPinvokeMap) (yypvt[-1].pinvAttr | pmCallConvThiscall); } break; case 302: #line 831 "asmparse.y" { yyval.pinvAttr = (CorPinvokeMap) (yypvt[-1].pinvAttr | pmCallConvFastcall); } break; case 303: #line 832 "asmparse.y" { yyval.pinvAttr = (CorPinvokeMap) (yypvt[-3].pinvAttr | pmBestFitEnabled); } break; case 304: #line 833 "asmparse.y" { yyval.pinvAttr = (CorPinvokeMap) (yypvt[-3].pinvAttr | pmBestFitDisabled); } break; case 305: #line 834 "asmparse.y" { yyval.pinvAttr = (CorPinvokeMap) (yypvt[-3].pinvAttr | pmThrowOnUnmappableCharEnabled); } break; case 306: #line 835 "asmparse.y" { yyval.pinvAttr = (CorPinvokeMap) (yypvt[-3].pinvAttr | pmThrowOnUnmappableCharDisabled); } break; case 307: #line 836 "asmparse.y" { yyval.pinvAttr = (CorPinvokeMap) (yypvt[-1].int32); } break; case 308: #line 839 "asmparse.y" { yyval.string = newString(COR_CTOR_METHOD_NAME); } break; case 309: #line 840 "asmparse.y" { yyval.string = newString(COR_CCTOR_METHOD_NAME); } break; case 310: #line 841 "asmparse.y" { yyval.string = yypvt[-0].string; } break; case 311: #line 844 "asmparse.y" { yyval.int32 = 0; } break; case 312: #line 845 "asmparse.y" { yyval.int32 = yypvt[-3].int32 | pdIn; } break; case 313: #line 846 "asmparse.y" { yyval.int32 = yypvt[-3].int32 | pdOut; } break; case 314: #line 847 "asmparse.y" { yyval.int32 = yypvt[-3].int32 | pdOptional; } break; case 315: #line 848 "asmparse.y" { yyval.int32 = yypvt[-1].int32 + 1; } break; case 316: #line 851 "asmparse.y" { yyval.implAttr = (CorMethodImpl) (miIL | miManaged); } break; case 317: #line 852 "asmparse.y" { yyval.implAttr = (CorMethodImpl) ((yypvt[-1].implAttr & 0xFFF4) | miNative); } break; case 318: #line 853 "asmparse.y" { yyval.implAttr = (CorMethodImpl) ((yypvt[-1].implAttr & 0xFFF4) | miIL); } break; case 319: #line 854 "asmparse.y" { yyval.implAttr = (CorMethodImpl) ((yypvt[-1].implAttr & 0xFFF4) | miOPTIL); } break; case 320: #line 855 "asmparse.y" { yyval.implAttr = (CorMethodImpl) ((yypvt[-1].implAttr & 0xFFFB) | miManaged); } break; case 321: #line 856 "asmparse.y" { yyval.implAttr = (CorMethodImpl) ((yypvt[-1].implAttr & 0xFFFB) | miUnmanaged); } break; case 322: #line 857 "asmparse.y" { yyval.implAttr = (CorMethodImpl) (yypvt[-1].implAttr | miForwardRef); } break; case 323: #line 858 "asmparse.y" { yyval.implAttr = (CorMethodImpl) (yypvt[-1].implAttr | miPreserveSig); } break; case 324: #line 859 "asmparse.y" { yyval.implAttr = (CorMethodImpl) (yypvt[-1].implAttr | miRuntime); } break; case 325: #line 860 "asmparse.y" { yyval.implAttr = (CorMethodImpl) (yypvt[-1].implAttr | miInternalCall); } break; case 326: #line 861 "asmparse.y" { yyval.implAttr = (CorMethodImpl) (yypvt[-1].implAttr | miSynchronized); } break; case 327: #line 862 "asmparse.y" { yyval.implAttr = (CorMethodImpl) (yypvt[-1].implAttr | miNoInlining); } break; case 328: #line 863 "asmparse.y" { yyval.implAttr = (CorMethodImpl) (yypvt[-1].implAttr | miAggressiveInlining); } break; case 329: #line 864 "asmparse.y" { yyval.implAttr = (CorMethodImpl) (yypvt[-1].implAttr | miNoOptimization); } break; case 330: #line 865 "asmparse.y" { yyval.implAttr = (CorMethodImpl) (yypvt[-1].implAttr | miAggressiveOptimization); } break; case 331: #line 866 "asmparse.y" { yyval.implAttr = (CorMethodImpl) (yypvt[-1].int32); } break; case 332: #line 869 "asmparse.y" { PASM->delArgNameList(PASM->m_firstArgName); PASM->m_firstArgName = NULL;PASM->m_lastArgName = NULL; } break; case 335: #line 877 "asmparse.y" { PASM->EmitByte(yypvt[-0].int32); } break; case 336: #line 878 "asmparse.y" { delete PASM->m_SEHD; PASM->m_SEHD = PASM->m_SEHDstack.POP(); } break; case 337: #line 879 "asmparse.y" { PASM->EmitMaxStack(yypvt[-0].int32); } break; case 338: #line 880 "asmparse.y" { PASM->EmitLocals(parser->MakeSig(IMAGE_CEE_CS_CALLCONV_LOCAL_SIG, 0, yypvt[-1].binstr)); } break; case 339: #line 882 "asmparse.y" { PASM->EmitZeroInit(); PASM->EmitLocals(parser->MakeSig(IMAGE_CEE_CS_CALLCONV_LOCAL_SIG, 0, yypvt[-1].binstr)); } break; case 340: #line 885 "asmparse.y" { PASM->EmitEntryPoint(); } break; case 341: #line 886 "asmparse.y" { PASM->EmitZeroInit(); } break; case 344: #line 889 "asmparse.y" { PASM->AddLabel(PASM->m_CurPC,yypvt[-1].string); /*PASM->EmitLabel($1);*/ } break; case 350: #line 895 "asmparse.y" { if(PASM->m_pCurMethod->m_dwExportOrdinal == 0xFFFFFFFF) { PASM->m_pCurMethod->m_dwExportOrdinal = yypvt[-1].int32; PASM->m_pCurMethod->m_szExportAlias = NULL; if(PASM->m_pCurMethod->m_wVTEntry == 0) PASM->m_pCurMethod->m_wVTEntry = 1; if(PASM->m_pCurMethod->m_wVTSlot == 0) PASM->m_pCurMethod->m_wVTSlot = yypvt[-1].int32 + 0x8000; } else PASM->report->warn("Duplicate .export directive, ignored\n"); } break; case 351: #line 905 "asmparse.y" { if(PASM->m_pCurMethod->m_dwExportOrdinal == 0xFFFFFFFF) { PASM->m_pCurMethod->m_dwExportOrdinal = yypvt[-3].int32; PASM->m_pCurMethod->m_szExportAlias = yypvt[-0].string; if(PASM->m_pCurMethod->m_wVTEntry == 0) PASM->m_pCurMethod->m_wVTEntry = 1; if(PASM->m_pCurMethod->m_wVTSlot == 0) PASM->m_pCurMethod->m_wVTSlot = yypvt[-3].int32 + 0x8000; } else PASM->report->warn("Duplicate .export directive, ignored\n"); } break; case 352: #line 915 "asmparse.y" { PASM->m_pCurMethod->m_wVTEntry = (WORD)yypvt[-2].int32; PASM->m_pCurMethod->m_wVTSlot = (WORD)yypvt[-0].int32; } break; case 353: #line 918 "asmparse.y" { PASM->AddMethodImpl(yypvt[-2].token,yypvt[-0].string,NULL,NULL,NULL,NULL); } break; case 354: #line 921 "asmparse.y" { PASM->AddMethodImpl(yypvt[-6].token,yypvt[-4].string, (yypvt[-3].int32==0 ? parser->MakeSig(yypvt[-8].int32,yypvt[-7].binstr,yypvt[-1].binstr) : parser->MakeSig(yypvt[-8].int32| IMAGE_CEE_CS_CALLCONV_GENERIC,yypvt[-7].binstr,yypvt[-1].binstr,yypvt[-3].int32)) ,NULL,NULL,NULL); PASM->ResetArgNameList(); } break; case 356: #line 928 "asmparse.y" { if((yypvt[-1].int32 > 0) && (yypvt[-1].int32 <= (int)PASM->m_pCurMethod->m_NumTyPars)) PASM->m_pCustomDescrList = PASM->m_pCurMethod->m_TyPars[yypvt[-1].int32-1].CAList(); else PASM->report->error("Type parameter index out of range\n"); } break; case 357: #line 933 "asmparse.y" { int n = PASM->m_pCurMethod->FindTyPar(yypvt[-0].string); if(n >= 0) PASM->m_pCustomDescrList = PASM->m_pCurMethod->m_TyPars[n].CAList(); else PASM->report->error("Type parameter '%s' undefined\n",yypvt[-0].string); } break; case 358: #line 939 "asmparse.y" { PASM->m_pCurMethod->AddGenericParamConstraint(yypvt[-3].int32, 0, yypvt[-0].token); } break; case 359: #line 940 "asmparse.y" { PASM->m_pCurMethod->AddGenericParamConstraint(0, yypvt[-2].string, yypvt[-0].token); } break; case 360: #line 943 "asmparse.y" { if( yypvt[-2].int32 ) { ARG_NAME_LIST* pAN=PASM->findArg(PASM->m_pCurMethod->m_firstArgName, yypvt[-2].int32 - 1); if(pAN) { PASM->m_pCustomDescrList = &(pAN->CustDList); pAN->pValue = yypvt[-0].binstr; } else { PASM->m_pCustomDescrList = NULL; if(yypvt[-0].binstr) delete yypvt[-0].binstr; } } else { PASM->m_pCustomDescrList = &(PASM->m_pCurMethod->m_RetCustDList); PASM->m_pCurMethod->m_pRetValue = yypvt[-0].binstr; } PASM->m_tkCurrentCVOwner = 0; } break; case 361: #line 963 "asmparse.y" { PASM->m_pCurMethod->CloseScope(); } break; case 362: #line 966 "asmparse.y" { PASM->m_pCurMethod->OpenScope(); } break; case 366: #line 977 "asmparse.y" { PASM->m_SEHD->tryTo = PASM->m_CurPC; } break; case 367: #line 978 "asmparse.y" { PASM->SetTryLabels(yypvt[-2].string, yypvt[-0].string); } break; case 368: #line 979 "asmparse.y" { if(PASM->m_SEHD) {PASM->m_SEHD->tryFrom = yypvt[-2].int32; PASM->m_SEHD->tryTo = yypvt[-0].int32;} } break; case 369: #line 983 "asmparse.y" { PASM->NewSEHDescriptor(); PASM->m_SEHD->tryFrom = PASM->m_CurPC; } break; case 370: #line 988 "asmparse.y" { PASM->EmitTry(); } break; case 371: #line 989 "asmparse.y" { PASM->EmitTry(); } break; case 372: #line 990 "asmparse.y" { PASM->EmitTry(); } break; case 373: #line 991 "asmparse.y" { PASM->EmitTry(); } break; case 374: #line 995 "asmparse.y" { PASM->m_SEHD->sehHandler = PASM->m_CurPC; } break; case 375: #line 996 "asmparse.y" { PASM->SetFilterLabel(yypvt[-0].string); PASM->m_SEHD->sehHandler = PASM->m_CurPC; } break; case 376: #line 998 "asmparse.y" { PASM->m_SEHD->sehFilter = yypvt[-0].int32; PASM->m_SEHD->sehHandler = PASM->m_CurPC; } break; case 377: #line 1002 "asmparse.y" { PASM->m_SEHD->sehClause = COR_ILEXCEPTION_CLAUSE_FILTER; PASM->m_SEHD->sehFilter = PASM->m_CurPC; } break; case 378: #line 1006 "asmparse.y" { PASM->m_SEHD->sehClause = COR_ILEXCEPTION_CLAUSE_NONE; PASM->SetCatchClass(yypvt[-0].token); PASM->m_SEHD->sehHandler = PASM->m_CurPC; } break; case 379: #line 1011 "asmparse.y" { PASM->m_SEHD->sehClause = COR_ILEXCEPTION_CLAUSE_FINALLY; PASM->m_SEHD->sehHandler = PASM->m_CurPC; } break; case 380: #line 1015 "asmparse.y" { PASM->m_SEHD->sehClause = COR_ILEXCEPTION_CLAUSE_FAULT; PASM->m_SEHD->sehHandler = PASM->m_CurPC; } break; case 381: #line 1019 "asmparse.y" { PASM->m_SEHD->sehHandlerTo = PASM->m_CurPC; } break; case 382: #line 1020 "asmparse.y" { PASM->SetHandlerLabels(yypvt[-2].string, yypvt[-0].string); } break; case 383: #line 1021 "asmparse.y" { PASM->m_SEHD->sehHandler = yypvt[-2].int32; PASM->m_SEHD->sehHandlerTo = yypvt[-0].int32; } break; case 385: #line 1029 "asmparse.y" { PASM->EmitDataLabel(yypvt[-1].string); } break; case 387: #line 1033 "asmparse.y" { PASM->SetDataSection(); } break; case 388: #line 1034 "asmparse.y" { PASM->SetTLSSection(); } break; case 389: #line 1035 "asmparse.y" { PASM->SetILSection(); } break; case 394: #line 1046 "asmparse.y" { yyval.int32 = 1; } break; case 395: #line 1047 "asmparse.y" { yyval.int32 = yypvt[-1].int32; if(yypvt[-1].int32 <= 0) { PASM->report->error("Illegal item count: %d\n",yypvt[-1].int32); if(!PASM->OnErrGo) yyval.int32 = 1; }} break; case 396: #line 1052 "asmparse.y" { PASM->EmitDataString(yypvt[-1].binstr); } break; case 397: #line 1053 "asmparse.y" { PASM->EmitDD(yypvt[-1].string); } break; case 398: #line 1054 "asmparse.y" { PASM->EmitData(yypvt[-1].binstr->ptr(),yypvt[-1].binstr->length()); } break; case 399: #line 1056 "asmparse.y" { float f = (float) (*yypvt[-2].float64); float* p = new (nothrow) float[yypvt[-0].int32]; if(p != NULL) { for(int i=0; i < yypvt[-0].int32; i++) p[i] = f; PASM->EmitData(p, sizeof(float)*yypvt[-0].int32); delete yypvt[-2].float64; delete [] p; } else PASM->report->error("Out of memory emitting data block %d bytes\n", sizeof(float)*yypvt[-0].int32); } break; case 400: #line 1063 "asmparse.y" { double* p = new (nothrow) double[yypvt[-0].int32]; if(p != NULL) { for(int i=0; i<yypvt[-0].int32; i++) p[i] = *(yypvt[-2].float64); PASM->EmitData(p, sizeof(double)*yypvt[-0].int32); delete yypvt[-2].float64; delete [] p; } else PASM->report->error("Out of memory emitting data block %d bytes\n", sizeof(double)*yypvt[-0].int32); } break; case 401: #line 1070 "asmparse.y" { __int64* p = new (nothrow) __int64[yypvt[-0].int32]; if(p != NULL) { for(int i=0; i<yypvt[-0].int32; i++) p[i] = *(yypvt[-2].int64); PASM->EmitData(p, sizeof(__int64)*yypvt[-0].int32); delete yypvt[-2].int64; delete [] p; } else PASM->report->error("Out of memory emitting data block %d bytes\n", sizeof(__int64)*yypvt[-0].int32); } break; case 402: #line 1077 "asmparse.y" { __int32* p = new (nothrow) __int32[yypvt[-0].int32]; if(p != NULL) { for(int i=0; i<yypvt[-0].int32; i++) p[i] = yypvt[-2].int32; PASM->EmitData(p, sizeof(__int32)*yypvt[-0].int32); delete [] p; } else PASM->report->error("Out of memory emitting data block %d bytes\n", sizeof(__int32)*yypvt[-0].int32); } break; case 403: #line 1084 "asmparse.y" { __int16 i = (__int16) yypvt[-2].int32; FAIL_UNLESS(i == yypvt[-2].int32, ("Value %d too big\n", yypvt[-2].int32)); __int16* p = new (nothrow) __int16[yypvt[-0].int32]; if(p != NULL) { for(int j=0; j<yypvt[-0].int32; j++) p[j] = i; PASM->EmitData(p, sizeof(__int16)*yypvt[-0].int32); delete [] p; } else PASM->report->error("Out of memory emitting data block %d bytes\n", sizeof(__int16)*yypvt[-0].int32); } break; case 404: #line 1092 "asmparse.y" { __int8 i = (__int8) yypvt[-2].int32; FAIL_UNLESS(i == yypvt[-2].int32, ("Value %d too big\n", yypvt[-2].int32)); __int8* p = new (nothrow) __int8[yypvt[-0].int32]; if(p != NULL) { for(int j=0; j<yypvt[-0].int32; j++) p[j] = i; PASM->EmitData(p, sizeof(__int8)*yypvt[-0].int32); delete [] p; } else PASM->report->error("Out of memory emitting data block %d bytes\n", sizeof(__int8)*yypvt[-0].int32); } break; case 405: #line 1099 "asmparse.y" { PASM->EmitData(NULL, sizeof(float)*yypvt[-0].int32); } break; case 406: #line 1100 "asmparse.y" { PASM->EmitData(NULL, sizeof(double)*yypvt[-0].int32); } break; case 407: #line 1101 "asmparse.y" { PASM->EmitData(NULL, sizeof(__int64)*yypvt[-0].int32); } break; case 408: #line 1102 "asmparse.y" { PASM->EmitData(NULL, sizeof(__int32)*yypvt[-0].int32); } break; case 409: #line 1103 "asmparse.y" { PASM->EmitData(NULL, sizeof(__int16)*yypvt[-0].int32); } break; case 410: #line 1104 "asmparse.y" { PASM->EmitData(NULL, sizeof(__int8)*yypvt[-0].int32); } break; case 411: #line 1108 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_R4); float f = (float)(*yypvt[-1].float64); yyval.binstr->appendInt32(*((__int32*)&f)); delete yypvt[-1].float64; } break; case 412: #line 1111 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_R8); yyval.binstr->appendInt64((__int64 *)yypvt[-1].float64); delete yypvt[-1].float64; } break; case 413: #line 1113 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_R4); yyval.binstr->appendInt32(yypvt[-1].int32); } break; case 414: #line 1115 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_R8); yyval.binstr->appendInt64((__int64 *)yypvt[-1].int64); delete yypvt[-1].int64; } break; case 415: #line 1117 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_I8); yyval.binstr->appendInt64((__int64 *)yypvt[-1].int64); delete yypvt[-1].int64; } break; case 416: #line 1119 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_I4); yyval.binstr->appendInt32(yypvt[-1].int32); } break; case 417: #line 1121 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_I2); yyval.binstr->appendInt16(yypvt[-1].int32); } break; case 418: #line 1123 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_I1); yyval.binstr->appendInt8(yypvt[-1].int32); } break; case 419: #line 1125 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_U8); yyval.binstr->appendInt64((__int64 *)yypvt[-1].int64); delete yypvt[-1].int64; } break; case 420: #line 1127 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_U4); yyval.binstr->appendInt32(yypvt[-1].int32); } break; case 421: #line 1129 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_U2); yyval.binstr->appendInt16(yypvt[-1].int32); } break; case 422: #line 1131 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_U1); yyval.binstr->appendInt8(yypvt[-1].int32); } break; case 423: #line 1133 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_U8); yyval.binstr->appendInt64((__int64 *)yypvt[-1].int64); delete yypvt[-1].int64; } break; case 424: #line 1135 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_U4); yyval.binstr->appendInt32(yypvt[-1].int32); } break; case 425: #line 1137 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_U2); yyval.binstr->appendInt16(yypvt[-1].int32); } break; case 426: #line 1139 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_U1); yyval.binstr->appendInt8(yypvt[-1].int32); } break; case 427: #line 1141 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_CHAR); yyval.binstr->appendInt16(yypvt[-1].int32); } break; case 428: #line 1143 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_BOOLEAN); yyval.binstr->appendInt8(yypvt[-1].int32);} break; case 429: #line 1145 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_STRING); yyval.binstr->append(yypvt[-1].binstr); delete yypvt[-1].binstr;} break; case 430: #line 1149 "asmparse.y" { bParsingByteArray = TRUE; } break; case 431: #line 1152 "asmparse.y" { yyval.binstr = new BinStr(); } break; case 432: #line 1153 "asmparse.y" { yyval.binstr = yypvt[-0].binstr; } break; case 433: #line 1156 "asmparse.y" { __int8 i = (__int8) yypvt[-0].int32; yyval.binstr = new BinStr(); yyval.binstr->appendInt8(i); } break; case 434: #line 1157 "asmparse.y" { __int8 i = (__int8) yypvt[-0].int32; yyval.binstr = yypvt[-1].binstr; yyval.binstr->appendInt8(i); } break; case 435: #line 1161 "asmparse.y" { yyval.binstr = yypvt[-0].binstr; } break; case 436: #line 1162 "asmparse.y" { yyval.binstr = BinStrToUnicode(yypvt[-0].binstr,true); yyval.binstr->insertInt8(ELEMENT_TYPE_STRING);} break; case 437: #line 1163 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_CLASS); yyval.binstr->appendInt32(0); } break; case 438: #line 1168 "asmparse.y" { yyval.binstr = yypvt[-0].binstr; } break; case 439: #line 1169 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_STRING); yyval.binstr->appendInt8(0xFF); } break; case 440: #line 1170 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_STRING); AppendStringWithLength(yyval.binstr,yypvt[-1].string); delete [] yypvt[-1].string;} break; case 441: #line 1172 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(SERIALIZATION_TYPE_TYPE); AppendStringWithLength(yyval.binstr,yypvt[-1].string); delete [] yypvt[-1].string;} break; case 442: #line 1174 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(SERIALIZATION_TYPE_TYPE); AppendStringWithLength(yyval.binstr,PASM->ReflectionNotation(yypvt[-1].token));} break; case 443: #line 1176 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(SERIALIZATION_TYPE_TYPE); yyval.binstr->appendInt8(0xFF); } break; case 444: #line 1177 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->insertInt8(SERIALIZATION_TYPE_TAGGED_OBJECT);} break; case 445: #line 1179 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->insertInt32(yypvt[-4].int32); yyval.binstr->insertInt8(ELEMENT_TYPE_R4); yyval.binstr->insertInt8(ELEMENT_TYPE_SZARRAY); } break; case 446: #line 1183 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->insertInt32(yypvt[-4].int32); yyval.binstr->insertInt8(ELEMENT_TYPE_R8); yyval.binstr->insertInt8(ELEMENT_TYPE_SZARRAY); } break; case 447: #line 1187 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->insertInt32(yypvt[-4].int32); yyval.binstr->insertInt8(ELEMENT_TYPE_I8); yyval.binstr->insertInt8(ELEMENT_TYPE_SZARRAY); } break; case 448: #line 1191 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->insertInt32(yypvt[-4].int32); yyval.binstr->insertInt8(ELEMENT_TYPE_I4); yyval.binstr->insertInt8(ELEMENT_TYPE_SZARRAY); } break; case 449: #line 1195 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->insertInt32(yypvt[-4].int32); yyval.binstr->insertInt8(ELEMENT_TYPE_I2); yyval.binstr->insertInt8(ELEMENT_TYPE_SZARRAY); } break; case 450: #line 1199 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->insertInt32(yypvt[-4].int32); yyval.binstr->insertInt8(ELEMENT_TYPE_I1); yyval.binstr->insertInt8(ELEMENT_TYPE_SZARRAY); } break; case 451: #line 1203 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->insertInt32(yypvt[-4].int32); yyval.binstr->insertInt8(ELEMENT_TYPE_U8); yyval.binstr->insertInt8(ELEMENT_TYPE_SZARRAY); } break; case 452: #line 1207 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->insertInt32(yypvt[-4].int32); yyval.binstr->insertInt8(ELEMENT_TYPE_U4); yyval.binstr->insertInt8(ELEMENT_TYPE_SZARRAY); } break; case 453: #line 1211 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->insertInt32(yypvt[-4].int32); yyval.binstr->insertInt8(ELEMENT_TYPE_U2); yyval.binstr->insertInt8(ELEMENT_TYPE_SZARRAY); } break; case 454: #line 1215 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->insertInt32(yypvt[-4].int32); yyval.binstr->insertInt8(ELEMENT_TYPE_U1); yyval.binstr->insertInt8(ELEMENT_TYPE_SZARRAY); } break; case 455: #line 1219 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->insertInt32(yypvt[-4].int32); yyval.binstr->insertInt8(ELEMENT_TYPE_U8); yyval.binstr->insertInt8(ELEMENT_TYPE_SZARRAY); } break; case 456: #line 1223 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->insertInt32(yypvt[-4].int32); yyval.binstr->insertInt8(ELEMENT_TYPE_U4); yyval.binstr->insertInt8(ELEMENT_TYPE_SZARRAY); } break; case 457: #line 1227 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->insertInt32(yypvt[-4].int32); yyval.binstr->insertInt8(ELEMENT_TYPE_U2); yyval.binstr->insertInt8(ELEMENT_TYPE_SZARRAY); } break; case 458: #line 1231 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->insertInt32(yypvt[-4].int32); yyval.binstr->insertInt8(ELEMENT_TYPE_U1); yyval.binstr->insertInt8(ELEMENT_TYPE_SZARRAY); } break; case 459: #line 1235 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->insertInt32(yypvt[-4].int32); yyval.binstr->insertInt8(ELEMENT_TYPE_CHAR); yyval.binstr->insertInt8(ELEMENT_TYPE_SZARRAY); } break; case 460: #line 1239 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->insertInt32(yypvt[-4].int32); yyval.binstr->insertInt8(ELEMENT_TYPE_BOOLEAN); yyval.binstr->insertInt8(ELEMENT_TYPE_SZARRAY); } break; case 461: #line 1243 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->insertInt32(yypvt[-4].int32); yyval.binstr->insertInt8(ELEMENT_TYPE_STRING); yyval.binstr->insertInt8(ELEMENT_TYPE_SZARRAY); } break; case 462: #line 1247 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->insertInt32(yypvt[-4].int32); yyval.binstr->insertInt8(SERIALIZATION_TYPE_TYPE); yyval.binstr->insertInt8(ELEMENT_TYPE_SZARRAY); } break; case 463: #line 1251 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->insertInt32(yypvt[-4].int32); yyval.binstr->insertInt8(SERIALIZATION_TYPE_TAGGED_OBJECT); yyval.binstr->insertInt8(ELEMENT_TYPE_SZARRAY); } break; case 464: #line 1257 "asmparse.y" { yyval.binstr = new BinStr(); } break; case 465: #line 1258 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; float f = (float) (*yypvt[-0].float64); yyval.binstr->appendInt32(*((__int32*)&f)); delete yypvt[-0].float64; } break; case 466: #line 1260 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->appendInt32(yypvt[-0].int32); } break; case 467: #line 1264 "asmparse.y" { yyval.binstr = new BinStr(); } break; case 468: #line 1265 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->appendInt64((__int64 *)yypvt[-0].float64); delete yypvt[-0].float64; } break; case 469: #line 1267 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->appendInt64((__int64 *)yypvt[-0].int64); delete yypvt[-0].int64; } break; case 470: #line 1271 "asmparse.y" { yyval.binstr = new BinStr(); } break; case 471: #line 1272 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->appendInt64((__int64 *)yypvt[-0].int64); delete yypvt[-0].int64; } break; case 472: #line 1276 "asmparse.y" { yyval.binstr = new BinStr(); } break; case 473: #line 1277 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->appendInt32(yypvt[-0].int32);} break; case 474: #line 1280 "asmparse.y" { yyval.binstr = new BinStr(); } break; case 475: #line 1281 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->appendInt16(yypvt[-0].int32);} break; case 476: #line 1284 "asmparse.y" { yyval.binstr = new BinStr(); } break; case 477: #line 1285 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->appendInt8(yypvt[-0].int32); } break; case 478: #line 1288 "asmparse.y" { yyval.binstr = new BinStr(); } break; case 479: #line 1289 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->appendInt8(yypvt[-0].int32);} break; case 480: #line 1293 "asmparse.y" { yyval.binstr = new BinStr(); } break; case 481: #line 1294 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->appendInt8(0xFF); } break; case 482: #line 1295 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; AppendStringWithLength(yyval.binstr,yypvt[-0].string); delete [] yypvt[-0].string;} break; case 483: #line 1299 "asmparse.y" { yyval.binstr = new BinStr(); } break; case 484: #line 1300 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->appendInt8(0xFF); } break; case 485: #line 1301 "asmparse.y" { yyval.binstr = yypvt[-2].binstr; AppendStringWithLength(yyval.binstr,yypvt[-0].string); delete [] yypvt[-0].string;} break; case 486: #line 1303 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; AppendStringWithLength(yyval.binstr,PASM->ReflectionNotation(yypvt[-0].token));} break; case 487: #line 1307 "asmparse.y" { yyval.binstr = new BinStr(); } break; case 488: #line 1308 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->append(yypvt[-0].binstr); delete yypvt[-0].binstr; } break; case 489: #line 1312 "asmparse.y" { parser->m_ANSFirst.PUSH(PASM->m_firstArgName); parser->m_ANSLast.PUSH(PASM->m_lastArgName); PASM->m_firstArgName = NULL; PASM->m_lastArgName = NULL; } break; case 490: #line 1318 "asmparse.y" { yyval.instr = SetupInstr(yypvt[-0].opcode); } break; case 491: #line 1321 "asmparse.y" { yyval.instr = SetupInstr(yypvt[-0].opcode); } break; case 492: #line 1324 "asmparse.y" { yyval.instr = SetupInstr(yypvt[-0].opcode); } break; case 493: #line 1327 "asmparse.y" { yyval.instr = SetupInstr(yypvt[-0].opcode); } break; case 494: #line 1330 "asmparse.y" { yyval.instr = SetupInstr(yypvt[-0].opcode); } break; case 495: #line 1333 "asmparse.y" { yyval.instr = SetupInstr(yypvt[-0].opcode); } break; case 496: #line 1336 "asmparse.y" { yyval.instr = SetupInstr(yypvt[-0].opcode); if((!PASM->OnErrGo)&& ((yypvt[-0].opcode == CEE_NEWOBJ)|| (yypvt[-0].opcode == CEE_CALLVIRT))) iCallConv = IMAGE_CEE_CS_CALLCONV_HASTHIS; } break; case 497: #line 1344 "asmparse.y" { yyval.instr = SetupInstr(yypvt[-0].opcode); } break; case 498: #line 1347 "asmparse.y" { yyval.instr = SetupInstr(yypvt[-0].opcode); } break; case 499: #line 1350 "asmparse.y" { yyval.instr = SetupInstr(yypvt[-0].opcode); } break; case 500: #line 1353 "asmparse.y" { yyval.instr = SetupInstr(yypvt[-0].opcode); } break; case 501: #line 1356 "asmparse.y" { yyval.instr = SetupInstr(yypvt[-0].opcode); iOpcodeLen = PASM->OpcodeLen(yyval.instr); } break; case 502: #line 1359 "asmparse.y" { yyval.instr = SetupInstr(yypvt[-0].opcode); } break; case 503: #line 1362 "asmparse.y" { yyval.instr = yypvt[-1].instr; bParsingByteArray = TRUE; } break; case 504: #line 1366 "asmparse.y" { PASM->EmitOpcode(yypvt[-0].instr); } break; case 505: #line 1367 "asmparse.y" { PASM->EmitInstrVar(yypvt[-1].instr, yypvt[-0].int32); } break; case 506: #line 1368 "asmparse.y" { PASM->EmitInstrVarByName(yypvt[-1].instr, yypvt[-0].string); } break; case 507: #line 1369 "asmparse.y" { PASM->EmitInstrI(yypvt[-1].instr, yypvt[-0].int32); } break; case 508: #line 1370 "asmparse.y" { PASM->EmitInstrI8(yypvt[-1].instr, yypvt[-0].int64); } break; case 509: #line 1371 "asmparse.y" { PASM->EmitInstrR(yypvt[-1].instr, yypvt[-0].float64); delete (yypvt[-0].float64);} break; case 510: #line 1372 "asmparse.y" { double f = (double) (*yypvt[-0].int64); PASM->EmitInstrR(yypvt[-1].instr, &f); } break; case 511: #line 1373 "asmparse.y" { unsigned L = yypvt[-1].binstr->length(); FAIL_UNLESS(L >= sizeof(float), ("%d hexbytes, must be at least %d\n", L,sizeof(float))); if(L < sizeof(float)) {YYERROR; } else { double f = (L >= sizeof(double)) ? *((double *)(yypvt[-1].binstr->ptr())) : (double)(*(float *)(yypvt[-1].binstr->ptr())); PASM->EmitInstrR(yypvt[-2].instr,&f); } delete yypvt[-1].binstr; } break; case 512: #line 1382 "asmparse.y" { PASM->EmitInstrBrOffset(yypvt[-1].instr, yypvt[-0].int32); } break; case 513: #line 1383 "asmparse.y" { PASM->EmitInstrBrTarget(yypvt[-1].instr, yypvt[-0].string); } break; case 514: #line 1385 "asmparse.y" { PASM->SetMemberRefFixup(yypvt[-0].token,PASM->OpcodeLen(yypvt[-1].instr)); PASM->EmitInstrI(yypvt[-1].instr,yypvt[-0].token); PASM->m_tkCurrentCVOwner = yypvt[-0].token; PASM->m_pCustomDescrList = NULL; iCallConv = 0; } break; case 515: #line 1392 "asmparse.y" { yypvt[-3].binstr->insertInt8(IMAGE_CEE_CS_CALLCONV_FIELD); mdToken mr = PASM->MakeMemberRef(yypvt[-2].token, yypvt[-0].string, yypvt[-3].binstr); PASM->SetMemberRefFixup(mr, PASM->OpcodeLen(yypvt[-4].instr)); PASM->EmitInstrI(yypvt[-4].instr,mr); PASM->m_tkCurrentCVOwner = mr; PASM->m_pCustomDescrList = NULL; } break; case 516: #line 1400 "asmparse.y" { yypvt[-1].binstr->insertInt8(IMAGE_CEE_CS_CALLCONV_FIELD); mdToken mr = PASM->MakeMemberRef(mdTokenNil, yypvt[-0].string, yypvt[-1].binstr); PASM->SetMemberRefFixup(mr, PASM->OpcodeLen(yypvt[-2].instr)); PASM->EmitInstrI(yypvt[-2].instr,mr); PASM->m_tkCurrentCVOwner = mr; PASM->m_pCustomDescrList = NULL; } break; case 517: #line 1407 "asmparse.y" { mdToken mr = yypvt[-0].token; PASM->SetMemberRefFixup(mr, PASM->OpcodeLen(yypvt[-1].instr)); PASM->EmitInstrI(yypvt[-1].instr,mr); PASM->m_tkCurrentCVOwner = mr; PASM->m_pCustomDescrList = NULL; } break; case 518: #line 1413 "asmparse.y" { mdToken mr = yypvt[-0].tdd->m_tkTypeSpec; PASM->SetMemberRefFixup(mr, PASM->OpcodeLen(yypvt[-1].instr)); PASM->EmitInstrI(yypvt[-1].instr,mr); PASM->m_tkCurrentCVOwner = mr; PASM->m_pCustomDescrList = NULL; } break; case 519: #line 1419 "asmparse.y" { mdToken mr = yypvt[-0].tdd->m_tkTypeSpec; PASM->SetMemberRefFixup(mr, PASM->OpcodeLen(yypvt[-1].instr)); PASM->EmitInstrI(yypvt[-1].instr,mr); PASM->m_tkCurrentCVOwner = mr; PASM->m_pCustomDescrList = NULL; } break; case 520: #line 1425 "asmparse.y" { PASM->EmitInstrI(yypvt[-1].instr, yypvt[-0].token); PASM->m_tkCurrentCVOwner = yypvt[-0].token; PASM->m_pCustomDescrList = NULL; } break; case 521: #line 1429 "asmparse.y" { PASM->EmitInstrStringLiteral(yypvt[-1].instr, yypvt[-0].binstr,TRUE); } break; case 522: #line 1431 "asmparse.y" { PASM->EmitInstrStringLiteral(yypvt[-4].instr, yypvt[-1].binstr,FALSE); } break; case 523: #line 1433 "asmparse.y" { PASM->EmitInstrStringLiteral(yypvt[-3].instr, yypvt[-1].binstr,FALSE,TRUE); } break; case 524: #line 1435 "asmparse.y" { PASM->EmitInstrSig(yypvt[-5].instr, parser->MakeSig(yypvt[-4].int32, yypvt[-3].binstr, yypvt[-1].binstr)); PASM->ResetArgNameList(); } break; case 525: #line 1439 "asmparse.y" { PASM->EmitInstrI(yypvt[-1].instr,yypvt[-0].token); PASM->m_tkCurrentCVOwner = yypvt[-0].token; PASM->m_pCustomDescrList = NULL; iOpcodeLen = 0; } break; case 526: #line 1444 "asmparse.y" { PASM->EmitInstrSwitch(yypvt[-3].instr, yypvt[-1].labels); } break; case 527: #line 1447 "asmparse.y" { yyval.labels = 0; } break; case 528: #line 1448 "asmparse.y" { yyval.labels = new Labels(yypvt[-2].string, yypvt[-0].labels, TRUE); } break; case 529: #line 1449 "asmparse.y" { yyval.labels = new Labels((char *)(UINT_PTR)yypvt[-2].int32, yypvt[-0].labels, FALSE); } break; case 530: #line 1450 "asmparse.y" { yyval.labels = new Labels(yypvt[-0].string, NULL, TRUE); } break; case 531: #line 1451 "asmparse.y" { yyval.labels = new Labels((char *)(UINT_PTR)yypvt[-0].int32, NULL, FALSE); } break; case 532: #line 1455 "asmparse.y" { yyval.binstr = NULL; } break; case 533: #line 1456 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; } break; case 534: #line 1459 "asmparse.y" { yyval.binstr = NULL; } break; case 535: #line 1460 "asmparse.y" { yyval.binstr = yypvt[-0].binstr; } break; case 536: #line 1463 "asmparse.y" { yyval.binstr = yypvt[-0].binstr; } break; case 537: #line 1464 "asmparse.y" { yyval.binstr = yypvt[-2].binstr; yyval.binstr->append(yypvt[-0].binstr); delete yypvt[-0].binstr; } break; case 538: #line 1468 "asmparse.y" { yyval.binstr = new BinStr(); } break; case 539: #line 1469 "asmparse.y" { yyval.binstr = yypvt[-0].binstr;} break; case 540: #line 1472 "asmparse.y" { yyval.binstr = yypvt[-0].binstr; } break; case 541: #line 1473 "asmparse.y" { yyval.binstr = yypvt[-2].binstr; yyval.binstr->append(yypvt[-0].binstr); delete yypvt[-0].binstr; } break; case 542: #line 1476 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_SENTINEL); } break; case 543: #line 1477 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->append(yypvt[-1].binstr); PASM->addArgName(NULL, yypvt[-1].binstr, yypvt[-0].binstr, yypvt[-2].int32); } break; case 544: #line 1478 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->append(yypvt[-2].binstr); PASM->addArgName(yypvt[-0].string, yypvt[-2].binstr, yypvt[-1].binstr, yypvt[-3].int32);} break; case 545: #line 1482 "asmparse.y" { yyval.token = PASM->ResolveClassRef(PASM->GetAsmRef(yypvt[-2].string), yypvt[-0].string, NULL); delete[] yypvt[-2].string;} break; case 546: #line 1483 "asmparse.y" { yyval.token = PASM->ResolveClassRef(yypvt[-2].token, yypvt[-0].string, NULL); } break; case 547: #line 1484 "asmparse.y" { yyval.token = PASM->ResolveClassRef(mdTokenNil, yypvt[-0].string, NULL); } break; case 548: #line 1485 "asmparse.y" { yyval.token = PASM->ResolveClassRef(PASM->GetModRef(yypvt[-2].string),yypvt[-0].string, NULL); delete[] yypvt[-2].string;} break; case 549: #line 1486 "asmparse.y" { yyval.token = PASM->ResolveClassRef(1,yypvt[-0].string,NULL); } break; case 550: #line 1487 "asmparse.y" { yyval.token = yypvt[-0].token; } break; case 551: #line 1488 "asmparse.y" { yyval.token = yypvt[-0].tdd->m_tkTypeSpec; } break; case 552: #line 1489 "asmparse.y" { if(PASM->m_pCurClass != NULL) yyval.token = PASM->m_pCurClass->m_cl; else { yyval.token = 0; PASM->report->error(".this outside class scope\n"); } } break; case 553: #line 1492 "asmparse.y" { if(PASM->m_pCurClass != NULL) { yyval.token = PASM->m_pCurClass->m_crExtends; if(RidFromToken(yyval.token) == 0) PASM->report->error(".base undefined\n"); } else { yyval.token = 0; PASM->report->error(".base outside class scope\n"); } } break; case 554: #line 1498 "asmparse.y" { if(PASM->m_pCurClass != NULL) { if(PASM->m_pCurClass->m_pEncloser != NULL) yyval.token = PASM->m_pCurClass->m_pEncloser->m_cl; else { yyval.token = 0; PASM->report->error(".nester undefined\n"); } } else { yyval.token = 0; PASM->report->error(".nester outside class scope\n"); } } break; case 555: #line 1505 "asmparse.y" { yyval.string = yypvt[-0].string; } break; case 556: #line 1506 "asmparse.y" { yyval.string = newStringWDel(yypvt[-2].string, NESTING_SEP, yypvt[-0].string); } break; case 557: #line 1509 "asmparse.y" { yyval.token = yypvt[-0].token;} break; case 558: #line 1510 "asmparse.y" { yyval.token = PASM->GetAsmRef(yypvt[-1].string); delete[] yypvt[-1].string;} break; case 559: #line 1511 "asmparse.y" { yyval.token = PASM->GetModRef(yypvt[-1].string); delete[] yypvt[-1].string;} break; case 560: #line 1512 "asmparse.y" { yyval.token = PASM->ResolveTypeSpec(yypvt[-0].binstr); } break; case 561: #line 1516 "asmparse.y" { yyval.binstr = new BinStr(); } break; case 562: #line 1518 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_CUSTOMMARSHALER); corEmitInt(yyval.binstr,yypvt[-7].binstr->length()); yyval.binstr->append(yypvt[-7].binstr); corEmitInt(yyval.binstr,yypvt[-5].binstr->length()); yyval.binstr->append(yypvt[-5].binstr); corEmitInt(yyval.binstr,yypvt[-3].binstr->length()); yyval.binstr->append(yypvt[-3].binstr); corEmitInt(yyval.binstr,yypvt[-1].binstr->length()); yyval.binstr->append(yypvt[-1].binstr); PASM->report->warn("Deprecated 4-string form of custom marshaler, first two strings ignored\n");} break; case 563: #line 1525 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_CUSTOMMARSHALER); corEmitInt(yyval.binstr,0); corEmitInt(yyval.binstr,0); corEmitInt(yyval.binstr,yypvt[-3].binstr->length()); yyval.binstr->append(yypvt[-3].binstr); corEmitInt(yyval.binstr,yypvt[-1].binstr->length()); yyval.binstr->append(yypvt[-1].binstr); } break; case 564: #line 1530 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_FIXEDSYSSTRING); corEmitInt(yyval.binstr,yypvt[-1].int32); } break; case 565: #line 1533 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_FIXEDARRAY); corEmitInt(yyval.binstr,yypvt[-2].int32); yyval.binstr->append(yypvt[-0].binstr); } break; case 566: #line 1535 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_VARIANT); PASM->report->warn("Deprecated native type 'variant'\n"); } break; case 567: #line 1537 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_CURRENCY); } break; case 568: #line 1538 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_SYSCHAR); PASM->report->warn("Deprecated native type 'syschar'\n"); } break; case 569: #line 1540 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_VOID); PASM->report->warn("Deprecated native type 'void'\n"); } break; case 570: #line 1542 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_BOOLEAN); } break; case 571: #line 1543 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_I1); } break; case 572: #line 1544 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_I2); } break; case 573: #line 1545 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_I4); } break; case 574: #line 1546 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_I8); } break; case 575: #line 1547 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_R4); } break; case 576: #line 1548 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_R8); } break; case 577: #line 1549 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_ERROR); } break; case 578: #line 1550 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_U1); } break; case 579: #line 1551 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_U2); } break; case 580: #line 1552 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_U4); } break; case 581: #line 1553 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_U8); } break; case 582: #line 1554 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_U1); } break; case 583: #line 1555 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_U2); } break; case 584: #line 1556 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_U4); } break; case 585: #line 1557 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_U8); } break; case 586: #line 1558 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->insertInt8(NATIVE_TYPE_PTR); PASM->report->warn("Deprecated native type '*'\n"); } break; case 587: #line 1560 "asmparse.y" { yyval.binstr = yypvt[-2].binstr; if(yyval.binstr->length()==0) yyval.binstr->appendInt8(NATIVE_TYPE_MAX); yyval.binstr->insertInt8(NATIVE_TYPE_ARRAY); } break; case 588: #line 1562 "asmparse.y" { yyval.binstr = yypvt[-3].binstr; if(yyval.binstr->length()==0) yyval.binstr->appendInt8(NATIVE_TYPE_MAX); yyval.binstr->insertInt8(NATIVE_TYPE_ARRAY); corEmitInt(yyval.binstr,0); corEmitInt(yyval.binstr,yypvt[-1].int32); corEmitInt(yyval.binstr,0); } break; case 589: #line 1567 "asmparse.y" { yyval.binstr = yypvt[-5].binstr; if(yyval.binstr->length()==0) yyval.binstr->appendInt8(NATIVE_TYPE_MAX); yyval.binstr->insertInt8(NATIVE_TYPE_ARRAY); corEmitInt(yyval.binstr,yypvt[-1].int32); corEmitInt(yyval.binstr,yypvt[-3].int32); corEmitInt(yyval.binstr,ntaSizeParamIndexSpecified); } break; case 590: #line 1572 "asmparse.y" { yyval.binstr = yypvt[-4].binstr; if(yyval.binstr->length()==0) yyval.binstr->appendInt8(NATIVE_TYPE_MAX); yyval.binstr->insertInt8(NATIVE_TYPE_ARRAY); corEmitInt(yyval.binstr,yypvt[-1].int32); } break; case 591: #line 1575 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_DECIMAL); PASM->report->warn("Deprecated native type 'decimal'\n"); } break; case 592: #line 1577 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_DATE); PASM->report->warn("Deprecated native type 'date'\n"); } break; case 593: #line 1579 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_BSTR); } break; case 594: #line 1580 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_LPSTR); } break; case 595: #line 1581 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_LPWSTR); } break; case 596: #line 1582 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_LPTSTR); } break; case 597: #line 1583 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_OBJECTREF); PASM->report->warn("Deprecated native type 'objectref'\n"); } break; case 598: #line 1585 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_IUNKNOWN); if(yypvt[-0].int32 != -1) corEmitInt(yyval.binstr,yypvt[-0].int32); } break; case 599: #line 1587 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_IDISPATCH); if(yypvt[-0].int32 != -1) corEmitInt(yyval.binstr,yypvt[-0].int32); } break; case 600: #line 1589 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_STRUCT); } break; case 601: #line 1590 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_INTF); if(yypvt[-0].int32 != -1) corEmitInt(yyval.binstr,yypvt[-0].int32); } break; case 602: #line 1592 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_SAFEARRAY); corEmitInt(yyval.binstr,yypvt[-0].int32); corEmitInt(yyval.binstr,0);} break; case 603: #line 1595 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_SAFEARRAY); corEmitInt(yyval.binstr,yypvt[-2].int32); corEmitInt(yyval.binstr,yypvt[-0].binstr->length()); yyval.binstr->append(yypvt[-0].binstr); } break; case 604: #line 1599 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_INT); } break; case 605: #line 1600 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_UINT); } break; case 606: #line 1601 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_UINT); } break; case 607: #line 1602 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_NESTEDSTRUCT); PASM->report->warn("Deprecated native type 'nested struct'\n"); } break; case 608: #line 1604 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_BYVALSTR); } break; case 609: #line 1605 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_ANSIBSTR); } break; case 610: #line 1606 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_TBSTR); } break; case 611: #line 1607 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_VARIANTBOOL); } break; case 612: #line 1608 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_FUNC); } break; case 613: #line 1609 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_ASANY); } break; case 614: #line 1610 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_LPSTRUCT); } break; case 615: #line 1611 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->append(yypvt[-0].tdd->m_pbsTypeSpec); } break; case 616: #line 1614 "asmparse.y" { yyval.int32 = -1; } break; case 617: #line 1615 "asmparse.y" { yyval.int32 = yypvt[-1].int32; } break; case 618: #line 1618 "asmparse.y" { yyval.int32 = VT_EMPTY; } break; case 619: #line 1619 "asmparse.y" { yyval.int32 = VT_NULL; } break; case 620: #line 1620 "asmparse.y" { yyval.int32 = VT_VARIANT; } break; case 621: #line 1621 "asmparse.y" { yyval.int32 = VT_CY; } break; case 622: #line 1622 "asmparse.y" { yyval.int32 = VT_VOID; } break; case 623: #line 1623 "asmparse.y" { yyval.int32 = VT_BOOL; } break; case 624: #line 1624 "asmparse.y" { yyval.int32 = VT_I1; } break; case 625: #line 1625 "asmparse.y" { yyval.int32 = VT_I2; } break; case 626: #line 1626 "asmparse.y" { yyval.int32 = VT_I4; } break; case 627: #line 1627 "asmparse.y" { yyval.int32 = VT_I8; } break; case 628: #line 1628 "asmparse.y" { yyval.int32 = VT_R4; } break; case 629: #line 1629 "asmparse.y" { yyval.int32 = VT_R8; } break; case 630: #line 1630 "asmparse.y" { yyval.int32 = VT_UI1; } break; case 631: #line 1631 "asmparse.y" { yyval.int32 = VT_UI2; } break; case 632: #line 1632 "asmparse.y" { yyval.int32 = VT_UI4; } break; case 633: #line 1633 "asmparse.y" { yyval.int32 = VT_UI8; } break; case 634: #line 1634 "asmparse.y" { yyval.int32 = VT_UI1; } break; case 635: #line 1635 "asmparse.y" { yyval.int32 = VT_UI2; } break; case 636: #line 1636 "asmparse.y" { yyval.int32 = VT_UI4; } break; case 637: #line 1637 "asmparse.y" { yyval.int32 = VT_UI8; } break; case 638: #line 1638 "asmparse.y" { yyval.int32 = VT_PTR; } break; case 639: #line 1639 "asmparse.y" { yyval.int32 = yypvt[-2].int32 | VT_ARRAY; } break; case 640: #line 1640 "asmparse.y" { yyval.int32 = yypvt[-1].int32 | VT_VECTOR; } break; case 641: #line 1641 "asmparse.y" { yyval.int32 = yypvt[-1].int32 | VT_BYREF; } break; case 642: #line 1642 "asmparse.y" { yyval.int32 = VT_DECIMAL; } break; case 643: #line 1643 "asmparse.y" { yyval.int32 = VT_DATE; } break; case 644: #line 1644 "asmparse.y" { yyval.int32 = VT_BSTR; } break; case 645: #line 1645 "asmparse.y" { yyval.int32 = VT_LPSTR; } break; case 646: #line 1646 "asmparse.y" { yyval.int32 = VT_LPWSTR; } break; case 647: #line 1647 "asmparse.y" { yyval.int32 = VT_UNKNOWN; } break; case 648: #line 1648 "asmparse.y" { yyval.int32 = VT_DISPATCH; } break; case 649: #line 1649 "asmparse.y" { yyval.int32 = VT_SAFEARRAY; } break; case 650: #line 1650 "asmparse.y" { yyval.int32 = VT_INT; } break; case 651: #line 1651 "asmparse.y" { yyval.int32 = VT_UINT; } break; case 652: #line 1652 "asmparse.y" { yyval.int32 = VT_UINT; } break; case 653: #line 1653 "asmparse.y" { yyval.int32 = VT_ERROR; } break; case 654: #line 1654 "asmparse.y" { yyval.int32 = VT_HRESULT; } break; case 655: #line 1655 "asmparse.y" { yyval.int32 = VT_CARRAY; } break; case 656: #line 1656 "asmparse.y" { yyval.int32 = VT_USERDEFINED; } break; case 657: #line 1657 "asmparse.y" { yyval.int32 = VT_RECORD; } break; case 658: #line 1658 "asmparse.y" { yyval.int32 = VT_FILETIME; } break; case 659: #line 1659 "asmparse.y" { yyval.int32 = VT_BLOB; } break; case 660: #line 1660 "asmparse.y" { yyval.int32 = VT_STREAM; } break; case 661: #line 1661 "asmparse.y" { yyval.int32 = VT_STORAGE; } break; case 662: #line 1662 "asmparse.y" { yyval.int32 = VT_STREAMED_OBJECT; } break; case 663: #line 1663 "asmparse.y" { yyval.int32 = VT_STORED_OBJECT; } break; case 664: #line 1664 "asmparse.y" { yyval.int32 = VT_BLOB_OBJECT; } break; case 665: #line 1665 "asmparse.y" { yyval.int32 = VT_CF; } break; case 666: #line 1666 "asmparse.y" { yyval.int32 = VT_CLSID; } break; case 667: #line 1670 "asmparse.y" { if(yypvt[-0].token == PASM->m_tkSysString) { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_STRING); } else if(yypvt[-0].token == PASM->m_tkSysObject) { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_OBJECT); } else yyval.binstr = parser->MakeTypeClass(ELEMENT_TYPE_CLASS, yypvt[-0].token); } break; case 668: #line 1676 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_OBJECT); } break; case 669: #line 1677 "asmparse.y" { yyval.binstr = parser->MakeTypeClass(ELEMENT_TYPE_VALUETYPE, yypvt[-0].token); } break; case 670: #line 1678 "asmparse.y" { yyval.binstr = parser->MakeTypeClass(ELEMENT_TYPE_VALUETYPE, yypvt[-0].token); } break; case 671: #line 1679 "asmparse.y" { yyval.binstr = yypvt[-2].binstr; yyval.binstr->insertInt8(ELEMENT_TYPE_SZARRAY); } break; case 672: #line 1680 "asmparse.y" { yyval.binstr = parser->MakeTypeArray(ELEMENT_TYPE_ARRAY, yypvt[-3].binstr, yypvt[-1].binstr); } break; case 673: #line 1681 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->insertInt8(ELEMENT_TYPE_BYREF); } break; case 674: #line 1682 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->insertInt8(ELEMENT_TYPE_PTR); } break; case 675: #line 1683 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->insertInt8(ELEMENT_TYPE_PINNED); } break; case 676: #line 1684 "asmparse.y" { yyval.binstr = parser->MakeTypeClass(ELEMENT_TYPE_CMOD_REQD, yypvt[-1].token); yyval.binstr->append(yypvt[-4].binstr); } break; case 677: #line 1686 "asmparse.y" { yyval.binstr = parser->MakeTypeClass(ELEMENT_TYPE_CMOD_OPT, yypvt[-1].token); yyval.binstr->append(yypvt[-4].binstr); } break; case 678: #line 1689 "asmparse.y" { yyval.binstr = parser->MakeSig(yypvt[-5].int32, yypvt[-4].binstr, yypvt[-1].binstr); yyval.binstr->insertInt8(ELEMENT_TYPE_FNPTR); PASM->delArgNameList(PASM->m_firstArgName); PASM->m_firstArgName = parser->m_ANSFirst.POP(); PASM->m_lastArgName = parser->m_ANSLast.POP(); } break; case 679: #line 1695 "asmparse.y" { if(yypvt[-1].binstr == NULL) yyval.binstr = yypvt[-3].binstr; else { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_GENERICINST); yyval.binstr->append(yypvt[-3].binstr); corEmitInt(yyval.binstr, corCountArgs(yypvt[-1].binstr)); yyval.binstr->append(yypvt[-1].binstr); delete yypvt[-3].binstr; delete yypvt[-1].binstr; }} break; case 680: #line 1702 "asmparse.y" { //if(PASM->m_pCurMethod) { // if(($3 < 0)||((DWORD)$3 >= PASM->m_pCurMethod->m_NumTyPars)) // PASM->report->error("Invalid method type parameter '%d'\n",$3); yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_MVAR); corEmitInt(yyval.binstr, yypvt[-0].int32); //} else PASM->report->error("Method type parameter '%d' outside method scope\n",$3); } break; case 681: #line 1708 "asmparse.y" { //if(PASM->m_pCurClass) { // if(($2 < 0)||((DWORD)$2 >= PASM->m_pCurClass->m_NumTyPars)) // PASM->report->error("Invalid type parameter '%d'\n",$2); yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_VAR); corEmitInt(yyval.binstr, yypvt[-0].int32); //} else PASM->report->error("Type parameter '%d' outside class scope\n",$2); } break; case 682: #line 1714 "asmparse.y" { int eltype = ELEMENT_TYPE_MVAR; int n=-1; if(PASM->m_pCurMethod) n = PASM->m_pCurMethod->FindTyPar(yypvt[-0].string); else { if(PASM->m_TyParList) n = PASM->m_TyParList->IndexOf(yypvt[-0].string); if(n == -1) { n = TyParFixupList.COUNT(); TyParFixupList.PUSH(yypvt[-0].string); eltype = ELEMENT_TYPE_MVARFIXUP; } } if(n == -1) { PASM->report->error("Invalid method type parameter '%s'\n",yypvt[-0].string); n = 0x1FFFFFFF; } yyval.binstr = new BinStr(); yyval.binstr->appendInt8(eltype); corEmitInt(yyval.binstr,n); } break; case 683: #line 1729 "asmparse.y" { int eltype = ELEMENT_TYPE_VAR; int n=-1; if(PASM->m_pCurClass && !newclass) n = PASM->m_pCurClass->FindTyPar(yypvt[-0].string); else { if(PASM->m_TyParList) n = PASM->m_TyParList->IndexOf(yypvt[-0].string); if(n == -1) { n = TyParFixupList.COUNT(); TyParFixupList.PUSH(yypvt[-0].string); eltype = ELEMENT_TYPE_VARFIXUP; } } if(n == -1) { PASM->report->error("Invalid type parameter '%s'\n",yypvt[-0].string); n = 0x1FFFFFFF; } yyval.binstr = new BinStr(); yyval.binstr->appendInt8(eltype); corEmitInt(yyval.binstr,n); } break; case 684: #line 1744 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_TYPEDBYREF); } break; case 685: #line 1745 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_VOID); } break; case 686: #line 1746 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_I); } break; case 687: #line 1747 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_U); } break; case 688: #line 1748 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_U); } break; case 689: #line 1749 "asmparse.y" { yyval.binstr = yypvt[-0].binstr; } break; case 690: #line 1750 "asmparse.y" { yyval.binstr = yypvt[-0].binstr; yyval.binstr->insertInt8(ELEMENT_TYPE_SENTINEL); } break; case 691: #line 1753 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_CHAR); } break; case 692: #line 1754 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_STRING); } break; case 693: #line 1755 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_BOOLEAN); } break; case 694: #line 1756 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_I1); } break; case 695: #line 1757 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_I2); } break; case 696: #line 1758 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_I4); } break; case 697: #line 1759 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_I8); } break; case 698: #line 1760 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_R4); } break; case 699: #line 1761 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_R8); } break; case 700: #line 1762 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_U1); } break; case 701: #line 1763 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_U2); } break; case 702: #line 1764 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_U4); } break; case 703: #line 1765 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_U8); } break; case 704: #line 1766 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_U1); } break; case 705: #line 1767 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_U2); } break; case 706: #line 1768 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_U4); } break; case 707: #line 1769 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_U8); } break; case 708: #line 1770 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->append(yypvt[-0].tdd->m_pbsTypeSpec); } break; case 709: #line 1773 "asmparse.y" { yyval.binstr = yypvt[-0].binstr; } break; case 710: #line 1774 "asmparse.y" { yyval.binstr = yypvt[-2].binstr; yypvt[-2].binstr->append(yypvt[-0].binstr); delete yypvt[-0].binstr; } break; case 711: #line 1777 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt32(0x7FFFFFFF); yyval.binstr->appendInt32(0x7FFFFFFF); } break; case 712: #line 1778 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt32(0x7FFFFFFF); yyval.binstr->appendInt32(0x7FFFFFFF); } break; case 713: #line 1779 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt32(0); yyval.binstr->appendInt32(yypvt[-0].int32); } break; case 714: #line 1780 "asmparse.y" { FAIL_UNLESS(yypvt[-2].int32 <= yypvt[-0].int32, ("lower bound %d must be <= upper bound %d\n", yypvt[-2].int32, yypvt[-0].int32)); if (yypvt[-2].int32 > yypvt[-0].int32) { YYERROR; }; yyval.binstr = new BinStr(); yyval.binstr->appendInt32(yypvt[-2].int32); yyval.binstr->appendInt32(yypvt[-0].int32-yypvt[-2].int32+1); } break; case 715: #line 1783 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt32(yypvt[-1].int32); yyval.binstr->appendInt32(0x7FFFFFFF); } break; case 716: #line 1788 "asmparse.y" { PASM->AddPermissionDecl(yypvt[-4].secAct, yypvt[-3].token, yypvt[-1].pair); } break; case 717: #line 1790 "asmparse.y" { PASM->AddPermissionDecl(yypvt[-5].secAct, yypvt[-4].token, yypvt[-1].binstr); } break; case 718: #line 1791 "asmparse.y" { PASM->AddPermissionDecl(yypvt[-1].secAct, yypvt[-0].token, (NVPair *)NULL); } break; case 719: #line 1792 "asmparse.y" { PASM->AddPermissionSetDecl(yypvt[-2].secAct, yypvt[-1].binstr); } break; case 720: #line 1794 "asmparse.y" { PASM->AddPermissionSetDecl(yypvt[-1].secAct,BinStrToUnicode(yypvt[-0].binstr,true));} break; case 721: #line 1796 "asmparse.y" { BinStr* ret = new BinStr(); ret->insertInt8('.'); corEmitInt(ret, nSecAttrBlobs); ret->append(yypvt[-1].binstr); PASM->AddPermissionSetDecl(yypvt[-4].secAct,ret); nSecAttrBlobs = 0; } break; case 722: #line 1804 "asmparse.y" { yyval.binstr = new BinStr(); nSecAttrBlobs = 0;} break; case 723: #line 1805 "asmparse.y" { yyval.binstr = yypvt[-0].binstr; nSecAttrBlobs = 1; } break; case 724: #line 1806 "asmparse.y" { yyval.binstr = yypvt[-2].binstr; yyval.binstr->append(yypvt[-0].binstr); nSecAttrBlobs++; } break; case 725: #line 1810 "asmparse.y" { yyval.binstr = PASM->EncodeSecAttr(PASM->ReflectionNotation(yypvt[-4].token),yypvt[-1].binstr,nCustomBlobNVPairs); nCustomBlobNVPairs = 0; } break; case 726: #line 1813 "asmparse.y" { yyval.binstr = PASM->EncodeSecAttr(yypvt[-4].string,yypvt[-1].binstr,nCustomBlobNVPairs); nCustomBlobNVPairs = 0; } break; case 727: #line 1817 "asmparse.y" { yyval.secAct = yypvt[-2].secAct; bParsingByteArray = TRUE; } break; case 728: #line 1819 "asmparse.y" { yyval.secAct = yypvt[-2].secAct; bParsingByteArray = TRUE; } break; case 729: #line 1822 "asmparse.y" { yyval.pair = yypvt[-0].pair; } break; case 730: #line 1823 "asmparse.y" { yyval.pair = yypvt[-2].pair->Concat(yypvt[-0].pair); } break; case 731: #line 1826 "asmparse.y" { yypvt[-2].binstr->appendInt8(0); yyval.pair = new NVPair(yypvt[-2].binstr, yypvt[-0].binstr); } break; case 732: #line 1829 "asmparse.y" { yyval.int32 = 1; } break; case 733: #line 1830 "asmparse.y" { yyval.int32 = 0; } break; case 734: #line 1833 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(SERIALIZATION_TYPE_BOOLEAN); yyval.binstr->appendInt8(yypvt[-0].int32); } break; case 735: #line 1836 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(SERIALIZATION_TYPE_I4); yyval.binstr->appendInt32(yypvt[-0].int32); } break; case 736: #line 1839 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(SERIALIZATION_TYPE_I4); yyval.binstr->appendInt32(yypvt[-1].int32); } break; case 737: #line 1842 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(SERIALIZATION_TYPE_STRING); yyval.binstr->append(yypvt[-0].binstr); delete yypvt[-0].binstr; yyval.binstr->appendInt8(0); } break; case 738: #line 1846 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(SERIALIZATION_TYPE_ENUM); char* sz = PASM->ReflectionNotation(yypvt[-5].token); strcpy_s((char *)yyval.binstr->getBuff((unsigned)strlen(sz) + 1), strlen(sz) + 1,sz); yyval.binstr->appendInt8(1); yyval.binstr->appendInt32(yypvt[-1].int32); } break; case 739: #line 1852 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(SERIALIZATION_TYPE_ENUM); char* sz = PASM->ReflectionNotation(yypvt[-5].token); strcpy_s((char *)yyval.binstr->getBuff((unsigned)strlen(sz) + 1), strlen(sz) + 1,sz); yyval.binstr->appendInt8(2); yyval.binstr->appendInt32(yypvt[-1].int32); } break; case 740: #line 1858 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(SERIALIZATION_TYPE_ENUM); char* sz = PASM->ReflectionNotation(yypvt[-5].token); strcpy_s((char *)yyval.binstr->getBuff((unsigned)strlen(sz) + 1), strlen(sz) + 1,sz); yyval.binstr->appendInt8(4); yyval.binstr->appendInt32(yypvt[-1].int32); } break; case 741: #line 1864 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(SERIALIZATION_TYPE_ENUM); char* sz = PASM->ReflectionNotation(yypvt[-3].token); strcpy_s((char *)yyval.binstr->getBuff((unsigned)strlen(sz) + 1), strlen(sz) + 1,sz); yyval.binstr->appendInt8(4); yyval.binstr->appendInt32(yypvt[-1].int32); } break; case 742: #line 1872 "asmparse.y" { yyval.secAct = dclRequest; } break; case 743: #line 1873 "asmparse.y" { yyval.secAct = dclDemand; } break; case 744: #line 1874 "asmparse.y" { yyval.secAct = dclAssert; } break; case 745: #line 1875 "asmparse.y" { yyval.secAct = dclDeny; } break; case 746: #line 1876 "asmparse.y" { yyval.secAct = dclPermitOnly; } break; case 747: #line 1877 "asmparse.y" { yyval.secAct = dclLinktimeCheck; } break; case 748: #line 1878 "asmparse.y" { yyval.secAct = dclInheritanceCheck; } break; case 749: #line 1879 "asmparse.y" { yyval.secAct = dclRequestMinimum; } break; case 750: #line 1880 "asmparse.y" { yyval.secAct = dclRequestOptional; } break; case 751: #line 1881 "asmparse.y" { yyval.secAct = dclRequestRefuse; } break; case 752: #line 1882 "asmparse.y" { yyval.secAct = dclPrejitGrant; } break; case 753: #line 1883 "asmparse.y" { yyval.secAct = dclPrejitDenied; } break; case 754: #line 1884 "asmparse.y" { yyval.secAct = dclNonCasDemand; } break; case 755: #line 1885 "asmparse.y" { yyval.secAct = dclNonCasLinkDemand; } break; case 756: #line 1886 "asmparse.y" { yyval.secAct = dclNonCasInheritance; } break; case 757: #line 1890 "asmparse.y" { PASM->ResetLineNumbers(); nCurrPC = PASM->m_CurPC; PENV->bExternSource = TRUE; PENV->bExternSourceAutoincrement = FALSE; } break; case 758: #line 1891 "asmparse.y" { PASM->ResetLineNumbers(); nCurrPC = PASM->m_CurPC; PENV->bExternSource = TRUE; PENV->bExternSourceAutoincrement = TRUE; } break; case 759: #line 1894 "asmparse.y" { PENV->nExtLine = PENV->nExtLineEnd = yypvt[-1].int32; PENV->nExtCol = 0; PENV->nExtColEnd = static_cast<unsigned>(-1); PASM->SetSourceFileName(yypvt[-0].string);} break; case 760: #line 1897 "asmparse.y" { PENV->nExtLine = PENV->nExtLineEnd = yypvt[-0].int32; PENV->nExtCol = 0; PENV->nExtColEnd = static_cast<unsigned>(-1); } break; case 761: #line 1899 "asmparse.y" { PENV->nExtLine = PENV->nExtLineEnd = yypvt[-3].int32; PENV->nExtCol=yypvt[-1].int32; PENV->nExtColEnd = static_cast<unsigned>(-1); PASM->SetSourceFileName(yypvt[-0].string);} break; case 762: #line 1902 "asmparse.y" { PENV->nExtLine = PENV->nExtLineEnd = yypvt[-2].int32; PENV->nExtCol=yypvt[-0].int32; PENV->nExtColEnd = static_cast<unsigned>(-1);} break; case 763: #line 1905 "asmparse.y" { PENV->nExtLine = PENV->nExtLineEnd = yypvt[-5].int32; PENV->nExtCol=yypvt[-3].int32; PENV->nExtColEnd = yypvt[-1].int32; PASM->SetSourceFileName(yypvt[-0].string);} break; case 764: #line 1909 "asmparse.y" { PENV->nExtLine = PENV->nExtLineEnd = yypvt[-4].int32; PENV->nExtCol=yypvt[-2].int32; PENV->nExtColEnd = yypvt[-0].int32; } break; case 765: #line 1912 "asmparse.y" { PENV->nExtLine = yypvt[-5].int32; PENV->nExtLineEnd = yypvt[-3].int32; PENV->nExtCol=yypvt[-1].int32; PENV->nExtColEnd = static_cast<unsigned>(-1); PASM->SetSourceFileName(yypvt[-0].string);} break; case 766: #line 1916 "asmparse.y" { PENV->nExtLine = yypvt[-4].int32; PENV->nExtLineEnd = yypvt[-2].int32; PENV->nExtCol=yypvt[-0].int32; PENV->nExtColEnd = static_cast<unsigned>(-1); } break; case 767: #line 1919 "asmparse.y" { PENV->nExtLine = yypvt[-7].int32; PENV->nExtLineEnd = yypvt[-5].int32; PENV->nExtCol=yypvt[-3].int32; PENV->nExtColEnd = yypvt[-1].int32; PASM->SetSourceFileName(yypvt[-0].string);} break; case 768: #line 1923 "asmparse.y" { PENV->nExtLine = yypvt[-6].int32; PENV->nExtLineEnd = yypvt[-4].int32; PENV->nExtCol=yypvt[-2].int32; PENV->nExtColEnd = yypvt[-0].int32; } break; case 769: #line 1925 "asmparse.y" { PENV->nExtLine = PENV->nExtLineEnd = yypvt[-1].int32 - 1; PENV->nExtCol = 0; PENV->nExtColEnd = static_cast<unsigned>(-1); PASM->SetSourceFileName(yypvt[-0].binstr);} break; case 770: #line 1932 "asmparse.y" { PASMM->AddFile(yypvt[-5].string, yypvt[-6].fileAttr|yypvt[-4].fileAttr|yypvt[-0].fileAttr, yypvt[-2].binstr); } break; case 771: #line 1933 "asmparse.y" { PASMM->AddFile(yypvt[-1].string, yypvt[-2].fileAttr|yypvt[-0].fileAttr, NULL); } break; case 772: #line 1936 "asmparse.y" { yyval.fileAttr = (CorFileFlags) 0; } break; case 773: #line 1937 "asmparse.y" { yyval.fileAttr = (CorFileFlags) (yypvt[-1].fileAttr | ffContainsNoMetaData); } break; case 774: #line 1940 "asmparse.y" { yyval.fileAttr = (CorFileFlags) 0; } break; case 775: #line 1941 "asmparse.y" { yyval.fileAttr = (CorFileFlags) 0x80000000; } break; case 776: #line 1944 "asmparse.y" { bParsingByteArray = TRUE; } break; case 777: #line 1947 "asmparse.y" { PASMM->StartAssembly(yypvt[-0].string, NULL, (DWORD)yypvt[-1].asmAttr, FALSE); } break; case 778: #line 1950 "asmparse.y" { yyval.asmAttr = (CorAssemblyFlags) 0; } break; case 779: #line 1951 "asmparse.y" { yyval.asmAttr = (CorAssemblyFlags) (yypvt[-1].asmAttr | afRetargetable); } break; case 780: #line 1952 "asmparse.y" { yyval.asmAttr = (CorAssemblyFlags) (yypvt[-1].asmAttr | afContentType_WindowsRuntime); } break; case 781: #line 1953 "asmparse.y" { yyval.asmAttr = (CorAssemblyFlags) (yypvt[-1].asmAttr | afPA_NoPlatform); } break; case 782: #line 1954 "asmparse.y" { yyval.asmAttr = yypvt[-2].asmAttr; } break; case 783: #line 1955 "asmparse.y" { SET_PA(yyval.asmAttr,yypvt[-1].asmAttr,afPA_MSIL); } break; case 784: #line 1956 "asmparse.y" { SET_PA(yyval.asmAttr,yypvt[-1].asmAttr,afPA_x86); } break; case 785: #line 1957 "asmparse.y" { SET_PA(yyval.asmAttr,yypvt[-1].asmAttr,afPA_AMD64); } break; case 786: #line 1958 "asmparse.y" { SET_PA(yyval.asmAttr,yypvt[-1].asmAttr,afPA_ARM); } break; case 787: #line 1959 "asmparse.y" { SET_PA(yyval.asmAttr,yypvt[-1].asmAttr,afPA_ARM64); } break; case 790: #line 1966 "asmparse.y" { PASMM->SetAssemblyHashAlg(yypvt[-0].int32); } break; case 793: #line 1971 "asmparse.y" { yyval.int32 = yypvt[-0].int32; } break; case 794: #line 1972 "asmparse.y" { yyval.int32 = 0xFFFF; } break; case 795: #line 1975 "asmparse.y" { PASMM->SetAssemblyPublicKey(yypvt[-1].binstr); } break; case 796: #line 1977 "asmparse.y" { PASMM->SetAssemblyVer((USHORT)yypvt[-6].int32, (USHORT)yypvt[-4].int32, (USHORT)yypvt[-2].int32, (USHORT)yypvt[-0].int32); } break; case 797: #line 1978 "asmparse.y" { yypvt[-0].binstr->appendInt8(0); PASMM->SetAssemblyLocale(yypvt[-0].binstr,TRUE); } break; case 798: #line 1979 "asmparse.y" { PASMM->SetAssemblyLocale(yypvt[-1].binstr,FALSE); } break; case 801: #line 1984 "asmparse.y" { bParsingByteArray = TRUE; } break; case 802: #line 1987 "asmparse.y" { bParsingByteArray = TRUE; } break; case 803: #line 1990 "asmparse.y" { bParsingByteArray = TRUE; } break; case 804: #line 1994 "asmparse.y" { PASMM->StartAssembly(yypvt[-0].string, NULL, yypvt[-1].asmAttr, TRUE); } break; case 805: #line 1996 "asmparse.y" { PASMM->StartAssembly(yypvt[-2].string, yypvt[-0].string, yypvt[-3].asmAttr, TRUE); } break; case 808: #line 2003 "asmparse.y" { PASMM->SetAssemblyHashBlob(yypvt[-1].binstr); } break; case 810: #line 2005 "asmparse.y" { PASMM->SetAssemblyPublicKeyToken(yypvt[-1].binstr); } break; case 811: #line 2006 "asmparse.y" { PASMM->SetAssemblyAutodetect(); } break; case 812: #line 2009 "asmparse.y" { PASMM->StartComType(yypvt[-0].string, yypvt[-1].exptAttr);} break; case 813: #line 2012 "asmparse.y" { PASMM->StartComType(yypvt[-0].string, yypvt[-1].exptAttr); } break; case 814: #line 2015 "asmparse.y" { yyval.exptAttr = (CorTypeAttr) 0; } break; case 815: #line 2016 "asmparse.y" { yyval.exptAttr = (CorTypeAttr) (yypvt[-1].exptAttr | tdNotPublic); } break; case 816: #line 2017 "asmparse.y" { yyval.exptAttr = (CorTypeAttr) (yypvt[-1].exptAttr | tdPublic); } break; case 817: #line 2018 "asmparse.y" { yyval.exptAttr = (CorTypeAttr) (yypvt[-1].exptAttr | tdForwarder); } break; case 818: #line 2019 "asmparse.y" { yyval.exptAttr = (CorTypeAttr) (yypvt[-2].exptAttr | tdNestedPublic); } break; case 819: #line 2020 "asmparse.y" { yyval.exptAttr = (CorTypeAttr) (yypvt[-2].exptAttr | tdNestedPrivate); } break; case 820: #line 2021 "asmparse.y" { yyval.exptAttr = (CorTypeAttr) (yypvt[-2].exptAttr | tdNestedFamily); } break; case 821: #line 2022 "asmparse.y" { yyval.exptAttr = (CorTypeAttr) (yypvt[-2].exptAttr | tdNestedAssembly); } break; case 822: #line 2023 "asmparse.y" { yyval.exptAttr = (CorTypeAttr) (yypvt[-2].exptAttr | tdNestedFamANDAssem); } break; case 823: #line 2024 "asmparse.y" { yyval.exptAttr = (CorTypeAttr) (yypvt[-2].exptAttr | tdNestedFamORAssem); } break; case 826: #line 2031 "asmparse.y" { PASMM->SetComTypeFile(yypvt[-0].string); } break; case 827: #line 2032 "asmparse.y" { PASMM->SetComTypeComType(yypvt[-0].string); } break; case 828: #line 2033 "asmparse.y" { PASMM->SetComTypeAsmRef(yypvt[-0].string); } break; case 829: #line 2034 "asmparse.y" { if(!PASMM->SetComTypeImplementationTok(yypvt[-1].int32)) PASM->report->error("Invalid implementation of exported type\n"); } break; case 830: #line 2036 "asmparse.y" { if(!PASMM->SetComTypeClassTok(yypvt[-0].int32)) PASM->report->error("Invalid TypeDefID of exported type\n"); } break; case 833: #line 2042 "asmparse.y" { PASMM->StartManifestRes(yypvt[-0].string, yypvt[-0].string, yypvt[-1].manresAttr); } break; case 834: #line 2044 "asmparse.y" { PASMM->StartManifestRes(yypvt[-2].string, yypvt[-0].string, yypvt[-3].manresAttr); } break; case 835: #line 2047 "asmparse.y" { yyval.manresAttr = (CorManifestResourceFlags) 0; } break; case 836: #line 2048 "asmparse.y" { yyval.manresAttr = (CorManifestResourceFlags) (yypvt[-1].manresAttr | mrPublic); } break; case 837: #line 2049 "asmparse.y" { yyval.manresAttr = (CorManifestResourceFlags) (yypvt[-1].manresAttr | mrPrivate); } break; case 840: #line 2056 "asmparse.y" { PASMM->SetManifestResFile(yypvt[-2].string, (ULONG)yypvt[-0].int32); } break; case 841: #line 2057 "asmparse.y" { PASMM->SetManifestResAsmRef(yypvt[-0].string); } break;/* End of actions */ #line 329 "F:\\NetFXDev1\\src\\tools\\devdiv\\amd64\\yypars.c" } } goto yystack; /* stack new state and value */ } #pragma warning(default:102) #ifdef YYDUMP YYLOCAL void YYNEAR YYPASCAL yydumpinfo(void) { short stackindex; short valindex; //dump yys printf("short yys[%d] {\n", YYMAXDEPTH); for (stackindex = 0; stackindex < YYMAXDEPTH; stackindex++){ if (stackindex) printf(", %s", stackindex % 10 ? "\0" : "\n"); printf("%6d", yys[stackindex]); } printf("\n};\n"); //dump yyv printf("YYSTYPE yyv[%d] {\n", YYMAXDEPTH); for (valindex = 0; valindex < YYMAXDEPTH; valindex++){ if (valindex) printf(", %s", valindex % 5 ? "\0" : "\n"); printf("%#*x", 3+sizeof(YYSTYPE), yyv[valindex]); } printf("\n};\n"); } #endif
/* * Created by Microsoft VCBU Internal YACC from "asmparse.y" */ #line 2 "asmparse.y" // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // File asmparse.y // #include "ilasmpch.h" #include "grammar_before.cpp" #line 16 "asmparse.y" #define UNION 1 typedef union { CorRegTypeAttr classAttr; CorMethodAttr methAttr; CorFieldAttr fieldAttr; CorMethodImpl implAttr; CorEventAttr eventAttr; CorPropertyAttr propAttr; CorPinvokeMap pinvAttr; CorDeclSecurity secAct; CorFileFlags fileAttr; CorAssemblyFlags asmAttr; CorAssemblyFlags asmRefAttr; CorTypeAttr exptAttr; CorManifestResourceFlags manresAttr; double* float64; __int64* int64; __int32 int32; char* string; BinStr* binstr; Labels* labels; Instr* instr; // instruction opcode NVPair* pair; pTyParList typarlist; mdToken token; TypeDefDescr* tdd; CustomDescr* cad; unsigned short opcode; } YYSTYPE; # define ERROR_ 257 # define BAD_COMMENT_ 258 # define BAD_LITERAL_ 259 # define ID 260 # define DOTTEDNAME 261 # define QSTRING 262 # define SQSTRING 263 # define INT32 264 # define INT64 265 # define FLOAT64 266 # define HEXBYTE 267 # define TYPEDEF_T 268 # define TYPEDEF_M 269 # define TYPEDEF_F 270 # define TYPEDEF_TS 271 # define TYPEDEF_MR 272 # define TYPEDEF_CA 273 # define DCOLON 274 # define ELIPSIS 275 # define VOID_ 276 # define BOOL_ 277 # define CHAR_ 278 # define UNSIGNED_ 279 # define INT_ 280 # define INT8_ 281 # define INT16_ 282 # define INT32_ 283 # define INT64_ 284 # define FLOAT_ 285 # define FLOAT32_ 286 # define FLOAT64_ 287 # define BYTEARRAY_ 288 # define UINT_ 289 # define UINT8_ 290 # define UINT16_ 291 # define UINT32_ 292 # define UINT64_ 293 # define FLAGS_ 294 # define CALLCONV_ 295 # define MDTOKEN_ 296 # define OBJECT_ 297 # define STRING_ 298 # define NULLREF_ 299 # define DEFAULT_ 300 # define CDECL_ 301 # define VARARG_ 302 # define STDCALL_ 303 # define THISCALL_ 304 # define FASTCALL_ 305 # define CLASS_ 306 # define TYPEDREF_ 307 # define UNMANAGED_ 308 # define FINALLY_ 309 # define HANDLER_ 310 # define CATCH_ 311 # define FILTER_ 312 # define FAULT_ 313 # define EXTENDS_ 314 # define IMPLEMENTS_ 315 # define TO_ 316 # define AT_ 317 # define TLS_ 318 # define TRUE_ 319 # define FALSE_ 320 # define _INTERFACEIMPL 321 # define VALUE_ 322 # define VALUETYPE_ 323 # define NATIVE_ 324 # define INSTANCE_ 325 # define SPECIALNAME_ 326 # define FORWARDER_ 327 # define STATIC_ 328 # define PUBLIC_ 329 # define PRIVATE_ 330 # define FAMILY_ 331 # define FINAL_ 332 # define SYNCHRONIZED_ 333 # define INTERFACE_ 334 # define SEALED_ 335 # define NESTED_ 336 # define ABSTRACT_ 337 # define AUTO_ 338 # define SEQUENTIAL_ 339 # define EXPLICIT_ 340 # define ANSI_ 341 # define UNICODE_ 342 # define AUTOCHAR_ 343 # define IMPORT_ 344 # define ENUM_ 345 # define VIRTUAL_ 346 # define NOINLINING_ 347 # define AGGRESSIVEINLINING_ 348 # define NOOPTIMIZATION_ 349 # define AGGRESSIVEOPTIMIZATION_ 350 # define UNMANAGEDEXP_ 351 # define BEFOREFIELDINIT_ 352 # define STRICT_ 353 # define RETARGETABLE_ 354 # define WINDOWSRUNTIME_ 355 # define NOPLATFORM_ 356 # define METHOD_ 357 # define FIELD_ 358 # define PINNED_ 359 # define MODREQ_ 360 # define MODOPT_ 361 # define SERIALIZABLE_ 362 # define PROPERTY_ 363 # define TYPE_ 364 # define ASSEMBLY_ 365 # define FAMANDASSEM_ 366 # define FAMORASSEM_ 367 # define PRIVATESCOPE_ 368 # define HIDEBYSIG_ 369 # define NEWSLOT_ 370 # define RTSPECIALNAME_ 371 # define PINVOKEIMPL_ 372 # define _CTOR 373 # define _CCTOR 374 # define LITERAL_ 375 # define NOTSERIALIZED_ 376 # define INITONLY_ 377 # define REQSECOBJ_ 378 # define CIL_ 379 # define OPTIL_ 380 # define MANAGED_ 381 # define FORWARDREF_ 382 # define PRESERVESIG_ 383 # define RUNTIME_ 384 # define INTERNALCALL_ 385 # define _IMPORT 386 # define NOMANGLE_ 387 # define LASTERR_ 388 # define WINAPI_ 389 # define AS_ 390 # define BESTFIT_ 391 # define ON_ 392 # define OFF_ 393 # define CHARMAPERROR_ 394 # define INSTR_NONE 395 # define INSTR_VAR 396 # define INSTR_I 397 # define INSTR_I8 398 # define INSTR_R 399 # define INSTR_BRTARGET 400 # define INSTR_METHOD 401 # define INSTR_FIELD 402 # define INSTR_TYPE 403 # define INSTR_STRING 404 # define INSTR_SIG 405 # define INSTR_TOK 406 # define INSTR_SWITCH 407 # define _CLASS 408 # define _NAMESPACE 409 # define _METHOD 410 # define _FIELD 411 # define _DATA 412 # define _THIS 413 # define _BASE 414 # define _NESTER 415 # define _EMITBYTE 416 # define _TRY 417 # define _MAXSTACK 418 # define _LOCALS 419 # define _ENTRYPOINT 420 # define _ZEROINIT 421 # define _EVENT 422 # define _ADDON 423 # define _REMOVEON 424 # define _FIRE 425 # define _OTHER 426 # define _PROPERTY 427 # define _SET 428 # define _GET 429 # define _PERMISSION 430 # define _PERMISSIONSET 431 # define REQUEST_ 432 # define DEMAND_ 433 # define ASSERT_ 434 # define DENY_ 435 # define PERMITONLY_ 436 # define LINKCHECK_ 437 # define INHERITCHECK_ 438 # define REQMIN_ 439 # define REQOPT_ 440 # define REQREFUSE_ 441 # define PREJITGRANT_ 442 # define PREJITDENY_ 443 # define NONCASDEMAND_ 444 # define NONCASLINKDEMAND_ 445 # define NONCASINHERITANCE_ 446 # define _LINE 447 # define P_LINE 448 # define _LANGUAGE 449 # define _CUSTOM 450 # define INIT_ 451 # define _SIZE 452 # define _PACK 453 # define _VTABLE 454 # define _VTFIXUP 455 # define FROMUNMANAGED_ 456 # define CALLMOSTDERIVED_ 457 # define _VTENTRY 458 # define RETAINAPPDOMAIN_ 459 # define _FILE 460 # define NOMETADATA_ 461 # define _HASH 462 # define _ASSEMBLY 463 # define _PUBLICKEY 464 # define _PUBLICKEYTOKEN 465 # define ALGORITHM_ 466 # define _VER 467 # define _LOCALE 468 # define EXTERN_ 469 # define _MRESOURCE 470 # define _MODULE 471 # define _EXPORT 472 # define LEGACY_ 473 # define LIBRARY_ 474 # define X86_ 475 # define AMD64_ 476 # define ARM_ 477 # define ARM64_ 478 # define MARSHAL_ 479 # define CUSTOM_ 480 # define SYSSTRING_ 481 # define FIXED_ 482 # define VARIANT_ 483 # define CURRENCY_ 484 # define SYSCHAR_ 485 # define DECIMAL_ 486 # define DATE_ 487 # define BSTR_ 488 # define TBSTR_ 489 # define LPSTR_ 490 # define LPWSTR_ 491 # define LPTSTR_ 492 # define OBJECTREF_ 493 # define IUNKNOWN_ 494 # define IDISPATCH_ 495 # define STRUCT_ 496 # define SAFEARRAY_ 497 # define BYVALSTR_ 498 # define LPVOID_ 499 # define ANY_ 500 # define ARRAY_ 501 # define LPSTRUCT_ 502 # define IIDPARAM_ 503 # define IN_ 504 # define OUT_ 505 # define OPT_ 506 # define _PARAM 507 # define _OVERRIDE 508 # define WITH_ 509 # define NULL_ 510 # define HRESULT_ 511 # define CARRAY_ 512 # define USERDEFINED_ 513 # define RECORD_ 514 # define FILETIME_ 515 # define BLOB_ 516 # define STREAM_ 517 # define STORAGE_ 518 # define STREAMED_OBJECT_ 519 # define STORED_OBJECT_ 520 # define BLOB_OBJECT_ 521 # define CF_ 522 # define CLSID_ 523 # define VECTOR_ 524 # define _SUBSYSTEM 525 # define _CORFLAGS 526 # define ALIGNMENT_ 527 # define _IMAGEBASE 528 # define _STACKRESERVE 529 # define _TYPEDEF 530 # define _TEMPLATE 531 # define _TYPELIST 532 # define _MSCORLIB 533 # define P_DEFINE 534 # define P_UNDEF 535 # define P_IFDEF 536 # define P_IFNDEF 537 # define P_ELSE 538 # define P_ENDIF 539 # define P_INCLUDE 540 # define CONSTRAINT_ 541 #define yyclearin yychar = -1 #define yyerrok yyerrflag = 0 #ifndef YYMAXDEPTH #define YYMAXDEPTH 150 #endif YYSTYPE yylval, yyval; #ifndef YYFARDATA #define YYFARDATA /*nothing*/ #endif #if ! defined YYSTATIC #define YYSTATIC /*nothing*/ #endif #if ! defined YYCONST #define YYCONST /*nothing*/ #endif #ifndef YYACT #define YYACT yyact #endif #ifndef YYPACT #define YYPACT yypact #endif #ifndef YYPGO #define YYPGO yypgo #endif #ifndef YYR1 #define YYR1 yyr1 #endif #ifndef YYR2 #define YYR2 yyr2 #endif #ifndef YYCHK #define YYCHK yychk #endif #ifndef YYDEF #define YYDEF yydef #endif #ifndef YYV #define YYV yyv #endif #ifndef YYS #define YYS yys #endif #ifndef YYLOCAL #define YYLOCAL #endif #ifndef YYR_T #define YYR_T int #endif typedef YYR_T yyr_t; #ifndef YYEXIND_T #define YYEXIND_T unsigned int #endif typedef YYEXIND_T yyexind_t; #ifndef YYOPTTIME #define YYOPTTIME 0 #endif # define YYERRCODE 256 #line 2062 "asmparse.y" #include "grammar_after.cpp" YYSTATIC YYCONST short yyexca[] = { #if !(YYOPTTIME) -1, 1, #endif 0, -1, -2, 0, #if !(YYOPTTIME) -1, 452, #endif 41, 538, -2, 311, #if !(YYOPTTIME) -1, 622, #endif 274, 555, 47, 555, -2, 230, #if !(YYOPTTIME) -1, 643, #endif 40, 310, 60, 310, -2, 555, #if !(YYOPTTIME) -1, 665, #endif 41, 538, -2, 311, #if !(YYOPTTIME) -1, 690, #endif 274, 555, 47, 555, -2, 516, #if !(YYOPTTIME) -1, 809, #endif 123, 235, -2, 555, #if !(YYOPTTIME) -1, 836, #endif 41, 538, -2, 311, #if !(YYOPTTIME) -1, 961, #endif 41, 538, -2, 311, #if !(YYOPTTIME) -1, 994, #endif 41, 538, -2, 311, #if !(YYOPTTIME) -1, 995, #endif 41, 538, -2, 311, #if !(YYOPTTIME) -1, 1323, #endif 41, 538, -2, 311, #if !(YYOPTTIME) -1, 1324, #endif 41, 538, -2, 311, #if !(YYOPTTIME) -1, 1331, #endif 41, 538, -2, 311, #if !(YYOPTTIME) -1, 1339, #endif 41, 538, -2, 311, #if !(YYOPTTIME) -1, 1465, #endif 41, 538, -2, 311, #if !(YYOPTTIME) -1, 1497, #endif 41, 538, -2, 311, #if !(YYOPTTIME) -1, 1564, #endif 41, 538, -2, 311, #if !(YYOPTTIME) -1, 1581, #endif 41, 538, -2, 311, }; # define YYNPROD 844 #if YYOPTTIME YYSTATIC YYCONST yyexind_t yyexcaind[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 24, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 38, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 42, 46, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 50, 54, 0, 0, 0, 0, 0, 0, 58, 0, 0, 0, 0, 0, 0, 0, 62, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 70, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 74, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 78 }; #endif # define YYLAST 3922 YYSTATIC YYCONST short YYFARDATA YYACT[] = { 703, 1484, 414, 1416, 1133, 640, 660, 191, 1482, 1485, 886, 1036, 971, 1483, 702, 788, 779, 885, 974, 729, 73, 75, 150, 625, 1521, 536, 1417, 792, 755, 190, 760, 757, 478, 176, 107, 972, 1146, 110, 106, 694, 1077, 275, 860, 604, 662, 599, 273, 78, 81, 219, 44, 24, 780, 262, 516, 214, 204, 7, 301, 188, 654, 76, 6, 991, 85, 5, 1569, 3, 1206, 1253, 220, 1125, 18, 1257, 115, 677, 1069, 264, 153, 1254, 307, 133, 74, 178, 179, 180, 181, 272, 1123, 218, 136, 10, 221, 300, 26, 137, 1070, 98, 278, 139, 217, 1124, 17, 202, 203, 581, 269, 74, 719, 716, 322, 265, 461, 113, 112, 700, 520, 939, 940, 352, 343, 88, 87, 353, 89, 462, 1255, 452, 1025, 268, 676, 338, 56, 537, 68, 1243, 1244, 305, 591, 88, 87, 357, 89, 277, 225, 327, 277, 368, 339, 1031, 342, 938, 361, 366, 185, 154, 1241, 1242, 98, 360, 359, 358, 277, 56, 88, 87, 345, 89, 656, 1573, 192, 782, 351, 783, 348, 365, 1537, 277, 86, 1039, 369, 310, 312, 314, 316, 318, 374, 277, 362, 198, 1038, 364, 699, 373, 199, 271, 200, 698, 1432, 84, 105, 379, 201, 417, 418, 363, 376, 1138, 1139, 450, 387, 451, 615, 88, 87, 186, 89, 388, 480, 195, 814, 1071, 663, 1505, 456, 767, 1279, 457, 1578, 999, 258, 473, 475, 416, 196, 1496, 484, 655, 481, 482, 470, 491, 468, 472, 471, 501, 346, 216, 495, 1329, 597, 1214, 192, 493, 441, 24, 833, 476, 479, 375, 433, 7, 1278, 56, 801, 813, 486, 432, 664, 74, 428, 492, 429, 483, 541, 436, 18, 641, 642, 487, 586, 376, 585, 584, 544, 56, 1354, 1355, 1356, 587, 941, 942, 267, 943, 435, 10, 154, 442, 26, 1337, 1336, 1335, 1334, 494, 791, 434, 17, 777, 542, 572, 74, 545, 714, 668, 575, 268, 576, 1249, 577, 1517, 499, 600, 862, 863, 864, 579, 580, 371, 370, 1009, 116, 511, 511, 528, 534, 108, 574, 372, 258, 571, 266, 549, 573, 192, 488, 367, 88, 87, 154, 89, 321, 1559, 601, 512, 512, 529, 535, 80, 79, 480, 198, 505, 152, 410, 1435, 199, 74, 200, 613, 1560, 348, 582, 583, 201, 74, 1130, 480, 46, 498, 481, 482, 375, 621, 88, 596, 459, 89, 624, 80, 79, 195, 1563, 607, 608, 609, 610, 481, 482, 1562, 606, 420, 614, 421, 422, 423, 196, 745, 1368, 612, 474, 611, 619, 620, 622, 485, 340, 341, 678, 1352, 639, 644, 74, 1561, 500, 88, 87, 1248, 89, 1128, 955, 56, 1153, 600, 595, 784, 135, 1154, 759, 649, 650, 354, 355, 356, 652, 635, 1507, 1536, 348, 884, 182, 643, 785, 56, 704, 1138, 1139, 666, 1408, 855, 321, 177, 88, 674, 1140, 89, 74, 669, 1531, 970, 1514, 685, 682, 951, 347, 566, 88, 1246, 46, 89, 74, 538, 1506, 1267, 569, 1528, 867, 1362, 747, 88, 87, 689, 89, 74, 671, 673, 1192, 1358, 1142, 696, 88, 87, 786, 89, 990, 989, 988, 588, 983, 982, 981, 980, 758, 978, 979, 105, 706, 987, 986, 985, 984, 537, 690, 1468, 977, 975, 651, 715, 1001, 1002, 1003, 1004, 88, 87, 692, 954, 177, 376, 693, 453, 155, 648, 705, 80, 79, 480, 683, 701, 546, 727, 707, 805, 61, 62, 47, 63, 709, 803, 710, 713, 1256, 861, 539, 460, 645, 481, 482, 718, 177, 1520, 647, 56, 1530, 88, 87, 225, 89, 74, 723, 1191, 659, 728, 646, 967, 730, 277, 1529, 724, 597, 725, 675, 976, 720, 80, 79, 1129, 762, 679, 680, 681, 413, 734, 82, 506, 1526, 56, 768, 769, 49, 50, 51, 52, 53, 54, 55, 74, 639, 1262, 1258, 1259, 1260, 1261, 74, 754, 1524, 601, 744, 733, 748, 749, 750, 1013, 98, 1011, 1012, 787, 543, 502, 72, 49, 50, 51, 52, 53, 54, 55, 74, 643, 74, 684, 1522, 477, 61, 62, 47, 63, 88, 87, 542, 89, 807, 808, 802, 71, 751, 752, 753, 812, 793, 821, 74, 514, 825, 819, 826, 822, 74, 695, 216, 70, 830, 1184, 1183, 1182, 1181, 156, 157, 158, 831, 804, 806, 74, 809, 74, 815, 480, 773, 774, 775, 790, 325, 841, 842, 823, 797, 69, 800, 818, 80, 79, 67, 377, 824, 832, 324, 481, 482, 348, 348, 854, 88, 87, 225, 89, 834, 858, 88, 87, 865, 89, 1153, 375, 672, 66, 930, 1154, 627, 628, 629, 49, 50, 51, 52, 53, 54, 55, 192, 944, 945, 868, 56, 853, 1153, 277, 856, 88, 87, 1154, 89, 74, 857, 49, 50, 51, 52, 53, 54, 55, 601, 957, 600, 1457, 630, 631, 632, 950, 1076, 1072, 1073, 1074, 1075, 1455, 946, 152, 1344, 46, 382, 383, 384, 385, 111, 177, 80, 79, 852, 74, 88, 87, 993, 89, 859, 348, 773, 88, 87, 1021, 89, 1022, 1019, 74, 1453, 362, 963, 956, 960, 966, 1018, 1451, 932, 46, 933, 934, 935, 936, 937, 216, 823, 74, 1032, 1434, 593, 1035, 637, 276, 1343, 968, 823, 606, 766, 997, 696, 696, 496, 1026, 1044, 1020, 1425, 74, 441, 1007, 1016, 1424, 1422, 1407, 433, 1027, 962, 1049, 829, 1024, 1047, 432, 1029, 1028, 428, 517, 429, 1051, 1042, 436, 1006, 1014, 528, 74, 1405, 80, 79, 480, 840, 1045, 1046, 1395, 145, 973, 519, 1411, 670, 765, 435, 1005, 1015, 442, 1008, 1017, 529, 823, 1062, 481, 482, 434, 1057, 88, 87, 337, 89, 1393, 49, 50, 51, 52, 53, 54, 55, 592, 277, 636, 277, 1391, 326, 323, 56, 152, 1389, 1067, 1387, 1385, 277, 1383, 49, 50, 51, 52, 53, 54, 55, 1381, 1379, 1376, 1373, 962, 1371, 1367, 41, 43, 1351, 1327, 1209, 1208, 1056, 996, 1055, 1134, 88, 87, 1315, 89, 762, 1079, 1054, 1080, 155, 776, 63, 722, 46, 1053, 543, 1034, 1033, 1144, 828, 1150, 1252, 616, 504, 820, 513, 737, 1131, 508, 509, 618, 1136, 617, 1141, 578, 522, 527, 177, 565, 1065, 1251, 308, 455, 1313, 109, 63, 1137, 1197, 92, 1145, 1198, 1195, 1196, 964, 1316, 770, 1037, 520, 1311, 513, 521, 1349, 508, 509, 1309, 1187, 1143, 695, 695, 992, 1194, 1193, 145, 1207, 1190, 953, 1210, 1185, 1, 1418, 1189, 1199, 1200, 1201, 1202, 1179, 1177, 1175, 1066, 589, 1234, 1203, 1204, 1205, 1314, 49, 50, 51, 52, 53, 54, 55, 712, 348, 88, 87, 1217, 89, 626, 1312, 590, 1211, 1245, 1063, 1152, 1310, 1188, 1247, 152, 1218, 1239, 1173, 1171, 1169, 145, 1238, 1237, 1240, 1186, 49, 50, 51, 52, 53, 54, 55, 1180, 1178, 1176, 88, 87, 348, 89, 74, 1167, 1165, 205, 155, 525, 352, 1250, 711, 708, 353, 156, 157, 158, 1163, 192, 192, 192, 192, 1135, 634, 277, 1161, 277, 1127, 192, 192, 192, 357, 1174, 1172, 1170, 177, 591, 412, 378, 1433, 626, 1263, 192, 46, 1159, 187, 794, 97, 88, 87, 63, 89, 1157, 1430, 949, 1168, 1166, 1508, 1138, 1139, 524, 1155, 351, 526, 317, 1266, 527, 1280, 1164, 1284, 315, 1286, 1288, 1289, 1270, 1292, 1162, 1294, 1295, 1296, 1297, 1298, 1299, 1300, 1274, 1302, 1303, 1304, 1305, 1306, 1307, 1308, 1269, 1429, 313, 1160, 1317, 1318, 1291, 1320, 1293, 1428, 1319, 1158, 1287, 1285, 1290, 152, 1301, 311, 1419, 1236, 1156, 309, 1282, 308, 1060, 1322, 1212, 306, 1213, 308, 1059, 844, 1328, 746, 667, 1233, 1333, 352, 1332, 45, 94, 353, 49, 50, 51, 52, 53, 54, 55, 454, 328, 329, 330, 308, 415, 88, 87, 277, 89, 357, 156, 157, 158, 155, 591, 1558, 823, 1345, 308, 1338, 1347, 1348, 308, 352, 1331, 332, 1330, 353, 308, 56, 277, 152, 1272, 1353, 1357, 1147, 525, 277, 140, 1215, 351, 177, 952, 1360, 1283, 357, 948, 1023, 1359, 277, 827, 1281, 277, 138, 1350, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 1470, 839, 591, 1469, 838, 1361, 351, 817, 63, 1397, 1398, 1399, 1400, 1401, 1402, 1403, 591, 1346, 524, 771, 764, 526, 1364, 258, 721, 796, 567, 117, 1409, 1410, 349, 1414, 1396, 1413, 352, 1571, 1415, 591, 772, 1043, 591, 591, 56, 1421, 1420, 931, 1583, 591, 1426, 331, 303, 333, 334, 335, 336, 1574, 357, 1152, 1572, 1565, 1541, 823, 1504, 1503, 88, 87, 1502, 89, 1412, 1472, 1467, 134, 155, 1464, 1341, 96, 1423, 1461, 104, 103, 102, 101, 1460, 99, 100, 105, 1456, 351, 156, 157, 158, 1454, 49, 50, 51, 52, 53, 54, 55, 1452, 177, 1450, 1437, 1509, 1431, 1427, 258, 1581, 697, 1406, 1510, 1404, 1394, 1462, 1392, 1463, 1390, 1388, 354, 355, 356, 97, 352, 1386, 836, 56, 353, 1473, 1474, 1475, 88, 87, 1466, 89, 1471, 1384, 531, 1382, 155, 88, 87, 1380, 89, 1378, 357, 1377, 1375, 1374, 1488, 350, 1152, 1372, 1370, 1486, 56, 1369, 1366, 1489, 1494, 1487, 88, 1365, 1566, 89, 1342, 1340, 177, 1476, 1326, 1325, 1499, 1321, 1271, 46, 1498, 351, 1363, 1268, 1235, 1516, 56, 1126, 1523, 1525, 1527, 1064, 1523, 1525, 1527, 258, 1050, 206, 1534, 1525, 1048, 1041, 1040, 1532, 1538, 1501, 1539, 1535, 1540, 1533, 1519, 1030, 969, 959, 958, 947, 866, 1515, 1518, 1513, 849, 848, 846, 156, 157, 158, 449, 843, 193, 1511, 837, 194, 835, 816, 823, 795, 778, 742, 1523, 1525, 1527, 741, 740, 739, 354, 355, 356, 738, 736, 88, 735, 688, 89, 638, 198, 177, 570, 425, 424, 199, 344, 200, 46, 320, 1564, 1497, 1493, 201, 1492, 1491, 1490, 1570, 1465, 1459, 1458, 1568, 1449, 1448, 1447, 1446, 354, 355, 356, 1577, 195, 1575, 1445, 1580, 1579, 156, 157, 158, 1582, 1444, 1567, 1443, 1442, 1441, 1440, 196, 1439, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 1438, 1436, 823, 1315, 59, 523, 1313, 1576, 208, 259, 210, 228, 212, 213, 1311, 1309, 1339, 56, 88, 882, 1187, 89, 41, 43, 56, 1185, 876, 1179, 877, 878, 879, 46, 1177, 1175, 1173, 1171, 515, 1169, 1167, 61, 62, 47, 63, 1165, 1163, 1161, 1324, 354, 355, 356, 223, 1323, 1265, 96, 1264, 56, 104, 103, 102, 101, 46, 99, 100, 105, 222, 1078, 871, 872, 873, 1068, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 998, 1061, 1052, 46, 59, 995, 994, 426, 208, 259, 210, 228, 212, 213, 961, 851, 226, 224, 1058, 850, 847, 845, 41, 43, 732, 731, 717, 691, 687, 870, 874, 875, 686, 880, 665, 633, 881, 603, 530, 61, 62, 47, 63, 49, 50, 51, 52, 53, 54, 55, 223, 602, 354, 355, 356, 594, 568, 548, 547, 497, 419, 411, 386, 319, 222, 304, 518, 302, 510, 507, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 503, 36, 184, 93, 59, 33, 469, 467, 208, 259, 210, 228, 212, 213, 466, 465, 226, 224, 95, 464, 244, 463, 41, 43, 227, 243, 215, 209, 1098, 38, 30, 58, 32, 59, 207, 211, 887, 31, 1010, 61, 62, 47, 63, 49, 50, 51, 52, 53, 54, 55, 223, 41, 43, 1000, 439, 38, 30, 58, 32, 59, 869, 799, 431, 798, 222, 46, 430, 427, 61, 62, 47, 63, 46, 540, 270, 60, 35, 41, 43, 83, 29, 21, 57, 34, 37, 25, 16, 263, 15, 189, 14, 39, 40, 261, 61, 62, 47, 63, 13, 226, 224, 60, 35, 46, 260, 12, 11, 21, 9, 8, 37, 4, 2, 444, 234, 242, 241, 39, 40, 240, 444, 239, 238, 237, 236, 235, 49, 50, 51, 52, 53, 54, 55, 233, 232, 231, 230, 229, 114, 77, 42, 756, 658, 657, 1500, 299, 19, 20, 90, 22, 23, 48, 183, 27, 28, 49, 50, 51, 52, 53, 54, 55, 1151, 761, 789, 1273, 965, 1149, 1148, 605, 1479, 1478, 19, 20, 1477, 22, 23, 48, 1495, 27, 28, 49, 50, 51, 52, 53, 54, 55, 882, 1481, 1480, 1216, 1132, 598, 661, 876, 781, 877, 878, 879, 448, 91, 58, 32, 59, 1081, 743, 448, 65, 58, 32, 59, 64, 197, 445, 883, 0, 0, 0, 446, 0, 445, 41, 43, 929, 0, 446, 0, 0, 41, 43, 0, 0, 0, 0, 871, 872, 873, 0, 61, 62, 47, 63, 1109, 437, 438, 61, 62, 47, 63, 0, 437, 438, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1085, 1086, 447, 1093, 1107, 1087, 1088, 1089, 1090, 447, 1091, 1092, 0, 1108, 1094, 1095, 1096, 1097, 63, 870, 874, 875, 0, 880, 0, 0, 881, 0, 532, 0, 0, 533, 0, 0, 0, 0, 0, 443, 440, 0, 0, 0, 0, 0, 443, 440, 0, 0, 0, 0, 0, 882, 0, 0, 0, 0, 193, 0, 876, 194, 877, 878, 879, 0, 49, 50, 51, 52, 53, 54, 55, 49, 50, 51, 52, 53, 54, 55, 0, 0, 0, 0, 198, 177, 0, 0, 0, 199, 0, 200, 0, 0, 0, 0, 0, 201, 901, 0, 871, 872, 873, 0, 49, 50, 51, 52, 53, 54, 55, 146, 928, 0, 195, 0, 0, 893, 894, 0, 902, 919, 895, 896, 897, 898, 0, 899, 900, 196, 920, 903, 904, 905, 906, 0, 0, 0, 0, 348, 0, 0, 0, 0, 0, 0, 870, 874, 875, 0, 880, 0, 0, 881, 1232, 1231, 1226, 0, 1225, 1224, 1223, 1222, 0, 1220, 1221, 105, 0, 1230, 1229, 1228, 1227, 0, 0, 0, 0, 917, 1219, 921, 0, 990, 989, 988, 923, 983, 982, 981, 980, 0, 978, 979, 105, 0, 987, 986, 985, 984, 0, 0, 925, 977, 975, 0, 1542, 0, 0, 0, 0, 0, 0, 1083, 1084, 0, 1099, 1100, 1101, 0, 1102, 1103, 0, 0, 1104, 1105, 0, 1106, 0, 0, 0, 0, 0, 0, 0, 926, 0, 0, 0, 0, 1082, 1110, 1111, 1112, 1113, 1114, 1115, 1116, 1117, 1118, 1119, 1120, 1121, 1122, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 901, 0, 0, 976, 0, 0, 1512, 0, 0, 0, 0, 0, 0, 0, 928, 0, 0, 0, 0, 893, 894, 0, 902, 919, 895, 896, 897, 898, 0, 899, 900, 0, 920, 903, 904, 905, 906, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 146, 0, 0, 0, 0, 888, 0, 889, 890, 891, 892, 907, 908, 909, 924, 910, 911, 912, 913, 914, 915, 916, 918, 922, 917, 0, 921, 927, 0, 0, 0, 923, 0, 175, 0, 0, 0, 151, 148, 163, 161, 170, 0, 164, 165, 166, 167, 925, 168, 169, 0, 0, 171, 172, 173, 174, 564, 1557, 152, 142, 162, 0, 0, 0, 0, 0, 0, 0, 141, 147, 0, 1547, 0, 0, 0, 0, 0, 0, 0, 0, 926, 0, 0, 0, 143, 144, 149, 1543, 556, 0, 550, 551, 552, 553, 0, 0, 1552, 0, 0, 146, 0, 0, 0, 0, 352, 0, 0, 0, 772, 0, 1553, 1554, 1555, 1556, 0, 0, 0, 0, 0, 160, 0, 0, 0, 0, 0, 0, 357, 558, 559, 560, 561, 0, 0, 555, 0, 0, 0, 562, 563, 554, 0, 0, 1544, 1545, 1546, 1548, 1549, 1550, 1551, 0, 0, 0, 0, 0, 0, 0, 0, 623, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 888, 0, 889, 890, 891, 892, 907, 908, 909, 924, 910, 911, 912, 913, 914, 915, 916, 918, 922, 990, 989, 988, 927, 983, 982, 981, 980, 0, 978, 979, 105, 0, 987, 986, 985, 984, 0, 0, 0, 977, 975, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 88, 87, 0, 89, 0, 0, 0, 0, 155, 0, 0, 175, 0, 0, 557, 151, 148, 163, 161, 170, 0, 164, 165, 166, 167, 0, 168, 169, 0, 0, 171, 172, 173, 174, 146, 0, 177, 142, 162, 352, 0, 0, 0, 353, 0, 0, 141, 147, 0, 976, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 357, 143, 144, 149, 0, 0, 175, 0, 0, 0, 0, 0, 163, 161, 170, 0, 164, 165, 166, 167, 0, 168, 169, 0, 0, 171, 172, 173, 174, 0, 0, 623, 1276, 162, 0, 0, 160, 159, 88, 87, 0, 89, 0, 0, 0, 0, 155, 0, 0, 175, 0, 0, 0, 151, 148, 163, 161, 170, 0, 164, 165, 166, 167, 0, 168, 169, 0, 0, 171, 172, 173, 174, 0, 0, 177, 142, 162, 0, 0, 0, 1277, 0, 0, 0, 141, 147, 0, 0, 0, 0, 297, 198, 156, 157, 158, 0, 199, 0, 200, 1275, 143, 144, 149, 0, 201, 0, 0, 0, 0, 0, 0, 0, 0, 0, 146, 0, 0, 0, 0, 352, 0, 195, 284, 353, 279, 280, 281, 282, 283, 63, 0, 0, 0, 287, 0, 160, 196, 354, 355, 356, 0, 357, 285, 0, 0, 0, 0, 295, 0, 286, 0, 641, 642, 0, 0, 0, 0, 0, 0, 0, 0, 288, 289, 290, 291, 292, 293, 294, 298, 0, 0, 0, 623, 0, 296, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 156, 157, 158, 0, 0, 0, 0, 0, 0, 88, 87, 0, 89, 0, 0, 0, 0, 155, 146, 0, 175, 0, 0, 0, 151, 148, 163, 161, 170, 0, 164, 165, 166, 167, 0, 168, 169, 0, 0, 171, 172, 173, 174, 0, 0, 177, 142, 162, 0, 0, 0, 0, 0, 0, 0, 141, 147, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 143, 144, 149, 0, 0, 274, 0, 0, 0, 0, 0, 0, 0, 0, 0, 146, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 160, 0, 354, 355, 356, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 641, 642, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 274, 0, 0, 0, 0, 0, 88, 87, 0, 89, 0, 0, 0, 0, 155, 0, 0, 175, 156, 157, 158, 151, 148, 163, 161, 170, 0, 164, 165, 166, 167, 0, 168, 169, 0, 0, 171, 172, 173, 174, 0, 146, 177, 142, 162, 0, 0, 0, 0, 0, 0, 0, 141, 147, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 143, 144, 149, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 274, 0, 88, 87, 160, 89, 354, 355, 356, 0, 155, 146, 0, 175, 0, 0, 0, 151, 148, 163, 161, 170, 0, 164, 165, 166, 167, 0, 168, 169, 0, 0, 171, 172, 173, 174, 0, 0, 177, 142, 162, 0, 0, 0, 0, 0, 0, 0, 141, 147, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 156, 157, 158, 0, 143, 144, 149, 0, 811, 274, 88, 87, 0, 89, 0, 0, 0, 0, 155, 0, 0, 175, 0, 146, 0, 151, 148, 163, 161, 170, 0, 164, 165, 166, 167, 0, 168, 169, 0, 160, 171, 172, 173, 174, 0, 0, 177, 142, 162, 0, 0, 0, 0, 810, 0, 0, 141, 147, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 143, 144, 149, 0, 0, 0, 0, 0, 0, 274, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 146, 0, 0, 156, 157, 158, 0, 0, 0, 0, 0, 0, 0, 0, 0, 160, 159, 88, 87, 0, 89, 0, 0, 0, 0, 155, 0, 0, 175, 0, 0, 0, 151, 148, 163, 161, 170, 0, 164, 165, 166, 167, 0, 168, 169, 0, 0, 171, 172, 173, 174, 0, 0, 177, 142, 162, 0, 0, 274, 0, 0, 0, 0, 141, 147, 0, 0, 0, 0, 0, 0, 156, 157, 158, 0, 0, 0, 0, 0, 143, 144, 149, 0, 0, 0, 0, 88, 87, 0, 89, 0, 0, 0, 0, 155, 0, 0, 175, 0, 0, 0, 151, 148, 163, 161, 170, 0, 164, 165, 166, 167, 0, 168, 169, 160, 0, 171, 172, 173, 174, 0, 0, 177, 142, 162, 0, 0, 0, 0, 0, 0, 0, 763, 147, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 146, 0, 143, 144, 149, 0, 0, 0, 0, 0, 0, 0, 88, 87, 0, 89, 0, 0, 0, 0, 155, 0, 0, 175, 156, 157, 158, 151, 148, 163, 161, 170, 0, 164, 165, 166, 167, 160, 168, 169, 0, 0, 171, 172, 173, 174, 0, 0, 177, 142, 162, 0, 0, 0, 0, 146, 0, 0, 141, 147, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 143, 144, 149, 0, 0, 0, 0, 88, 87, 0, 89, 0, 0, 0, 0, 155, 0, 0, 175, 156, 157, 158, 151, 148, 163, 161, 170, 0, 164, 165, 166, 167, 0, 168, 169, 653, 146, 171, 172, 173, 174, 0, 0, 177, 142, 162, 88, 87, 0, 89, 0, 0, 0, 141, 147, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 143, 144, 149, 0, 0, 0, 0, 0, 0, 0, 0, 409, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 156, 157, 158, 996, 0, 0, 146, 0, 0, 0, 0, 0, 0, 458, 0, 0, 0, 391, 0, 0, 0, 407, 0, 0, 389, 390, 0, 0, 0, 393, 394, 405, 395, 396, 397, 398, 399, 400, 401, 402, 392, 0, 0, 0, 0, 0, 0, 406, 0, 0, 404, 0, 0, 0, 0, 0, 0, 403, 0, 0, 146, 0, 0, 0, 726, 0, 408, 0, 0, 156, 157, 158, 489, 175, 490, 0, 0, 151, 148, 163, 161, 170, 0, 164, 165, 166, 167, 0, 168, 169, 0, 0, 171, 172, 173, 174, 0, 0, 177, 142, 162, 0, 0, 0, 0, 0, 0, 0, 141, 147, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 143, 144, 149, 380, 175, 381, 0, 0, 151, 148, 163, 161, 170, 0, 164, 165, 166, 167, 0, 168, 169, 0, 0, 171, 172, 173, 174, 0, 0, 0, 142, 162, 0, 0, 0, 160, 0, 0, 0, 141, 147, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 143, 144, 149, 0, 175, 0, 0, 0, 151, 148, 163, 161, 170, 0, 164, 165, 166, 167, 0, 168, 169, 0, 0, 171, 172, 173, 174, 0, 0, 0, 142, 162, 0, 0, 0, 160, 0, 0, 0, 141, 147, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 143, 144, 149, 0, 0, 0, 0, 0, 0, 0, 175, 0, 0, 0, 151, 148, 163, 161, 170, 0, 164, 165, 166, 167, 0, 168, 169, 0, 0, 171, 172, 173, 174, 0, 0, 160, 142, 162, 0, 0, 0, 0, 0, 0, 0, 141, 147, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 143, 144, 149, 175, 0, 0, 0, 151, 148, 163, 161, 170, 0, 164, 165, 166, 167, 0, 168, 169, 0, 0, 171, 172, 173, 174, 0, 0, 0, 142, 162, 0, 0, 0, 0, 160, 0, 0, 141, 147, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 143, 144, 149, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 160 }; YYSTATIC YYCONST short YYFARDATA YYPACT[] = { -1000, 1423,-1000, 609, 586,-1000,-1000,-1000,-1000,-1000, -1000,-1000, 581, 555, 539, 514,-1000,-1000,-1000, 102, 102, -466, 124, 124,-1000,-1000,-1000, 478,-1000, -115, 535,-1000, 907, 1099, 68, 903, 102, -355, -356,-1000, -139, 855, 68, 855,-1000,-1000,-1000, 172, 2319, 535, 535, 535, 535,-1000,-1000, 187,-1000,-1000,-1000, -164, 1074,-1000,-1000, 1825, 68, 68,-1000,-1000, 1368,-1000, -1000,-1000,-1000,-1000,-1000,-1000, 102, -121,-1000,-1000, -1000,-1000, 691, -120, 2983, 1193,-1000,-1000,-1000,-1000, 2436,-1000, 102,-1000, 1385,-1000, 1310, 1718, 68, 1169, 1163, 1159, 1144, 1120, 1114, 1716, 1518, 83,-1000, 102, 655, 878,-1000,-1000, 86, 1193, 535, 2983,-1000,-1000, -1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000, -1000,-1000,-1000, 1515, 185, 1288, 1061, -229, -230, -231, -238, 691,-1000, -101, 691, 1255, 312,-1000,-1000, 48, -1000, 3564, 239, 1081,-1000,-1000,-1000,-1000,-1000, 3394, -1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000, 504,-1000,-1000,-1000,-1000,-1000, 1193, 1715, 538, 1193, 1193, 1193,-1000, 3232, 123,-1000,-1000, 1714, 1066, 2884, -1000, 3564,-1000,-1000,-1000, 65, 65,-1000, 1713,-1000, -1000, 99, 1513, 1512, 1575, 1397,-1000,-1000, 102,-1000, 102, 87,-1000,-1000,-1000,-1000, 1173,-1000,-1000,-1000, -1000,-1000, 901, 102, 3193,-1000, 21, -69,-1000,-1000, 201, 102, 124, 610, 68, 201, 1255, 3339, 2983, -88, 65, 2884, 1712,-1000, 215,-1000,-1000,-1000,-1000,-1000, -1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000, 511, 545, 860, 1606,-1000, 100,-1000, 355, 691,-1000, -1000, 2983,-1000,-1000, 164, 1217, 65, 535,-1000,-1000, -1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000, -1000,-1000,-1000,-1000,-1000,-1000,-1000, 1711, 1710, 2114, 895, 349, 1284, 1709, 123, 1511, -48,-1000, 102, -48, -1000, 124,-1000, 102,-1000, 102,-1000, 102,-1000,-1000, -1000,-1000, 891,-1000, 102, 102,-1000, 1193,-1000,-1000, -1000, -369,-1000,-1000,-1000,-1000,-1000, 878, -47, 116, -1000,-1000, 1193, 999,-1000, 1299, 789, 1708,-1000, 170, 535, 157,-1000,-1000,-1000, 1704, 1690, 3564, 535, 535, 535, 535,-1000, 691,-1000,-1000, 3564, 228,-1000, 1193, -1000, -68,-1000, 1217, 879, 889, 887, 535, 535, 2721, -1000,-1000,-1000,-1000,-1000,-1000, 102, 1299, 1070,-1000, -1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000, -1000,-1000,-1000,-1000,-1000, 406,-1000,-1000,-1000, 1688, 1052,-1000, 791, 1508,-1000,-1000, 2580,-1000,-1000, 102, -1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000, 458, 446, 417,-1000,-1000,-1000,-1000,-1000, 102, 102, 402, 3124,-1000,-1000, -304, -196,-1000,-1000,-1000,-1000,-1000, -1000,-1000, -53, 1687,-1000, 102, 1158, 39, 65, 794, 640, 102,-1000, -69, 107, 107, 107, 107, 2983, 215, -1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000, -1000, 1685, 1681, 1506,-1000,-1000,-1000, 2721,-1000,-1000, -1000,-1000, 1299, 1680, 68, 3564,-1000, 201, 1285,-1000, -119, -124,-1000,-1000, -351,-1000,-1000, 68, 411, 454, 68,-1000,-1000, 1041,-1000,-1000, 68,-1000, 68,-1000, 1040, 991,-1000,-1000, 535, -157, -360, 1679,-1000,-1000, -1000,-1000, 535, -361,-1000,-1000, -346,-1000,-1000,-1000, 1282,-1000, 869, 535, 3564, 1193, 3510, 102, 108, 1181, -1000,-1000,-1000,-1000,-1000,-1000,-1000, 1678,-1000,-1000, -1000,-1000,-1000,-1000, 1677,-1000,-1000, 1385, 108, 1505, -1000, 1503, 883, 1502, 1498, 1497, 1496, 1492,-1000, 362, 1157,-1000, 97, 1193,-1000,-1000,-1000, 298, 535, 108, 388, 175, 3052,-1000,-1000, 1278, 1193,-1000, 793,-1000, -1000, -50, 2983, 2983, 943, 1277, 1217, 1193, 1193, 1193, 1193,-1000, 2418,-1000, 1193,-1000, 535, 535, 535, 867, 1193, 33, 1193, 494, 1491,-1000, 128,-1000,-1000,-1000, -1000,-1000,-1000, 102,-1000, 1299,-1000,-1000, 1255, 30, 1076,-1000,-1000, 1193, 1490, 1202,-1000,-1000,-1000,-1000, -1000,-1000, -10, 65, 465, 459, 2983, 2816, -106, -47, 1488, 1265,-1000,-1000, 3510, -53, 881, 102, -96, 3564, 102, 1193, 102, 1238, 876,-1000,-1000,-1000, 201,-1000, -1000,-1000,-1000,-1000,-1000,-1000, 102, 124,-1000, -18, 1193, 108, 1487, 1386, 1485, 1262, 1259,-1000, 123, 102, 102, 1482, 1155,-1000,-1000, 1299, 1674, 1477, 1673, 1476, 1475, 1672, 1668, 1193, 535,-1000, 535, 102, 141, 535, 68, 2983, 535, 706, 1298, 81, -182, 1471, 95, 1795, 131, 1877, 102,-1000, 1306,-1000, 900,-1000, 900, 900, 900, 900, 900, -166,-1000, 102, 102, 535,-1000,-1000, -1000,-1000,-1000,-1000, 1193, 1470, 1234, 1083,-1000,-1000, 347, 1230, 964, 271, 166,-1000, 46, 102, 1469, 1468, -1000, 3564, 1667, 1081, 1081, 1081, 535, 535,-1000, 941, 542, 128,-1000,-1000,-1000,-1000,-1000, 1467, 343, 226, 958, -96, 1659, 1658, 3449,-1000,-1000, 1568, 104, 204, 690, -96, 3564, 102, 1193, 102, 1235, -322, 535, 1193, -1000,-1000, 3564,-1000,-1000, 1193,-1000, -53, 81, 1466, -241,-1000,-1000, 1193, 2721, 874, 873, 2983, 945, -126, -137, 1457, 1456, 535, 1300,-1000, -53,-1000, 201, 201, -1000,-1000,-1000,-1000, 411,-1000,-1000,-1000,-1000,-1000, -1000,-1000, 1081, 1193, 1455, 102, 1193, 1451,-1000, 535, -96, 1655, 871, 864, 856, 854,-1000, 108, 1670,-1000, -1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000, 1154, 1148, 1654, 945, 123, 1446, 947, 68, 1639, -405, -56,-1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000, -1000,-1000, 495,-1000,-1000,-1000,-1000,-1000,-1000,-1000, -1000,-1000,-1000,-1000, 1635, 1635,-1000, 1635, 1762,-1000, -1000, -408,-1000, -387,-1000,-1000, -429,-1000,-1000,-1000, 1442,-1000,-1000,-1000,-1000,-1000,-1000,-1000, 123,-1000, -1000,-1000,-1000,-1000, 165, 331, 1193,-1000, 108, 829, 338,-1000, 3052, 374, 955,-1000,-1000,-1000,-1000,-1000, 1217, -53, 1081, 1193,-1000, 535, 1223, 2983,-1000,-1000, -1000, 393,-1000,-1000,-1000, 1111, 1102, 1094, 1075, 1067, 1055, 1054, 1033, 1032, 1031, 997, 996, 995, 399, 987, 975, 68, 455, 1076, -53, -53, 102, 938,-1000,-1000, -1000, 1255, 1255, 1255, 1255,-1000,-1000,-1000,-1000,-1000, -1000, 1255, 1255, 1255,-1000,-1000,-1000,-1000,-1000, -441, 2721, 853, 852, 2983,-1000, 1255, 1193, 1181,-1000, 123, -1000, 123, -23,-1000, 1227,-1000,-1000, 1913, 123, 102, -1000,-1000, 1193,-1000, 1439,-1000,-1000, 1143,-1000,-1000, -287, 998, 1877,-1000,-1000,-1000,-1000, 1299,-1000, -236, -257, 102,-1000,-1000,-1000,-1000, 383, 192, 108, 899, 880,-1000,-1000,-1000,-1000,-1000,-1000,-1000, -434,-1000, -1000, 35,-1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000, -1000,-1000,-1000, 336,-1000,-1000,-1000,-1000,-1000,-1000, -1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000, -1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000, -1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000, 102,-1000,-1000,-1000,-1000, 1624, 1299, 1622,-1000,-1000, -1000,-1000,-1000, 359, 1438, 1223,-1000, 128, 1433, 1220, -1000, 2375,-1000,-1000,-1000, -37, 102, 977, 102, 1938, 102, 110, 102, 93, 102, 124, 102, 102, 102, 102, 102, 102, 102, 124, 102, 102, 102, 102, 102, 102, 102, 974, 968, 953, 913, 102, 102, -112, 102, 1432, 1299,-1000,-1000, 1621, 1616, 1430, 1429, 851,-1000,-1000, -1000,-1000,-1000,-1000,-1000,-1000, 65, -25,-1000, 1214, -1000, 1216,-1000,-1000, -96, 2983,-1000,-1000, 1299,-1000, 1615, 1614, 1613, 1608, 1607, 1605, 18, 1604, 1603, 1602, 1597, 1595, 1590,-1000,-1000,-1000, 411,-1000, 1586, 1426, 1335,-1000,-1000,-1000,-1000, 1425,-1000, 740, 102,-1000, 1275, 102, 102, 950, 108, 850,-1000,-1000,-1000,-1000, -1000,-1000,-1000, 155, 102, 5, 371,-1000,-1000,-1000, -1000,-1000, 2983, 395,-1000,-1000,-1000, 1172, 1422, 1417, 847, 144, 1416, 1413, 846, 1412, 844, 1408, 1407, 843, 1406, 1404, 842, 1402, 841, 1398, 833, 1396, 831, 1384, 830, 1378, 828, 1377, 823, 1375, 811, 1373, 787, 124, 102, 102, 102, 102, 102, 102, 102, 1372, 780, 1370, 759,-1000, 332, -53, -53,-1000,-1000, 822, 3564, -96, 2983, -53, 969,-1000, 1585, 1584, 1576, 1573, 1142, -53, -1000,-1000,-1000,-1000, 102, 758, 108, 757, 752, 102, 1299,-1000,-1000, 1366, 1133, 1125, 1085, 1365,-1000, 73, -1000, 1068, 735, 101,-1000,-1000,-1000, 1571, 1363,-1000, -1000, 1570,-1000, 1556,-1000,-1000, 1554,-1000,-1000, 1553, -1000, 1552,-1000, 1551,-1000, 1549,-1000, 1542,-1000, 1535, -1000, 1534,-1000, 1533,-1000, 1532, 1362, 723, 1360, 716, 1352, 687, 1347, 677,-1000, 1530,-1000, 1529,-1000, 1343, 1338,-1000, 2721, 969,-1000, 1334, 1528,-1000, 857, 411, 1331, 429,-1000, 1261,-1000, 2042, 1330,-1000, 102, 102, 102,-1000,-1000, 1938,-1000,-1000,-1000,-1000,-1000,-1000, -1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000, -1000, 1526,-1000, 1525,-1000, 1524,-1000, 1522,-1000,-1000, -1000,-1000, -39, 1521, 945, -53,-1000,-1000,-1000, 108, -1000, 947,-1000, 1327, 1324, 1323,-1000, 182, 1106, 2264, 428, 278, 527, 608, 582, 562, 443, 544, 530, 426, -1000,-1000,-1000,-1000, 405, 135, -96, -53,-1000, 1321, 2115, 1203,-1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000, 88,-1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000, -1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000, -1000,-1000, 328, 381, 357, 350,-1000,-1000,-1000, 1520, 1320,-1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000,-1000, -1000,-1000,-1000,-1000,-1000,-1000,-1000, 1424, 108,-1000, -1000,-1000,-1000,-1000, -53, -443, 102, 1296, 1319, -188, 1316,-1000,-1000, 65,-1000, 3564, 2721, -46, -96, 969, 1369, -53, 1307,-1000 }; YYSTATIC YYCONST short YYFARDATA YYPGO[] = { 0, 33, 178, 5, 1991, 78, 39, 7, 1989, 0, 1988, 1984, 1982, 268, 80, 1981, 1977, 4, 1972, 52, 40, 3, 26, 32, 24, 6, 1970, 44, 41, 45, 1969, 38, 34, 10, 17, 11, 31, 1968, 42, 1967, 35, 18, 1966, 1965, 9, 1, 13, 8, 1954, 1950, 1947, 1946, 22, 27, 43, 1945, 1944, 1943, 1942, 15, 1941, 1940, 12, 1939, 30, 1938, 14, 36, 16, 23, 46, 2, 599, 59, 1236, 29, 106, 1928, 1924, 1921, 1920, 1919, 1918, 19, 28, 1917, 1329, 1916, 1915, 25, 789, 131, 1914, 50, 1221, 1913, 1912, 1911, 1910, 1909, 1901, 1900, 1899, 1898, 1897, 1895, 1892, 1891, 1890, 1028, 1888, 67, 56, 1887, 65, 134, 62, 55, 1885, 1884, 89, 1882, 1881, 1880, 1874, 1869, 1866, 53, 1864, 1863, 1862, 100, 70, 49, 1861, 92, 292, 1859, 1858, 1856, 1855, 1850, 1849, 1843, 1842, 1839, 1838, 1837, 1830, 832, 1829, 1814, 1813, 1812, 1811, 1810, 1803, 1802, 75, 1801, 1800, 125, 1797, 1796, 1795, 130, 1791, 1790, 1783, 1782, 1781, 1779, 1778, 58, 1760, 63, 1777, 54, 1776, 602, 1762, 1761, 1759, 1646, 1615, 1438 }; YYSTATIC YYCONST yyr_t YYFARDATA YYR1[]={ 0, 109, 109, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 136, 136, 36, 36, 133, 133, 133, 2, 2, 1, 1, 1, 9, 24, 24, 23, 23, 23, 134, 134, 134, 134, 134, 135, 135, 135, 135, 135, 135, 135, 135, 135, 93, 93, 93, 93, 94, 94, 94, 94, 10, 11, 73, 72, 72, 59, 61, 61, 61, 62, 62, 62, 65, 65, 132, 132, 132, 60, 60, 60, 60, 60, 60, 130, 130, 130, 119, 12, 12, 12, 12, 12, 12, 118, 137, 113, 138, 139, 111, 77, 77, 77, 77, 77, 77, 77, 77, 77, 77, 77, 77, 77, 77, 77, 77, 77, 77, 77, 77, 77, 77, 77, 77, 77, 77, 77, 140, 140, 141, 141, 112, 112, 142, 142, 56, 56, 57, 57, 69, 69, 18, 18, 18, 18, 18, 19, 19, 68, 68, 67, 67, 58, 21, 21, 22, 143, 143, 143, 143, 143, 143, 143, 143, 143, 143, 143, 143, 143, 143, 143, 143, 143, 143, 143, 143, 143, 116, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 4, 4, 35, 35, 16, 16, 75, 75, 75, 75, 75, 75, 75, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 76, 74, 74, 74, 74, 74, 74, 144, 144, 81, 81, 81, 145, 145, 150, 150, 150, 150, 150, 150, 150, 150, 146, 82, 82, 82, 147, 147, 151, 151, 151, 151, 151, 151, 151, 152, 38, 38, 34, 34, 153, 114, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 83, 83, 83, 83, 83, 83, 83, 83, 83, 83, 83, 83, 83, 83, 83, 83, 3, 3, 3, 13, 13, 13, 13, 13, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 154, 115, 115, 155, 155, 155, 155, 155, 155, 155, 155, 155, 155, 155, 155, 155, 155, 155, 155, 155, 155, 155, 155, 155, 155, 155, 155, 155, 155, 158, 159, 156, 161, 161, 160, 160, 160, 163, 162, 162, 162, 162, 166, 166, 166, 169, 164, 167, 168, 165, 165, 165, 117, 170, 170, 172, 172, 172, 171, 171, 173, 173, 14, 14, 174, 174, 174, 174, 174, 174, 174, 174, 174, 174, 174, 174, 174, 174, 174, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 175, 31, 31, 32, 32, 39, 39, 39, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 42, 42, 42, 43, 43, 43, 47, 47, 46, 46, 45, 45, 44, 44, 48, 48, 49, 49, 49, 50, 50, 50, 50, 51, 51, 149, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 6, 6, 6, 6, 6, 53, 53, 54, 54, 55, 55, 25, 25, 26, 26, 27, 27, 27, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 5, 5, 71, 71, 71, 71, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 20, 20, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 30, 30, 29, 29, 29, 29, 29, 131, 131, 131, 131, 131, 131, 64, 64, 64, 63, 63, 87, 87, 84, 84, 85, 17, 17, 37, 37, 37, 37, 37, 37, 37, 37, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 176, 176, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 121, 121, 88, 88, 89, 89, 177, 122, 90, 90, 90, 90, 90, 90, 90, 90, 90, 90, 123, 123, 178, 178, 178, 66, 66, 179, 179, 179, 179, 179, 179, 180, 182, 181, 124, 124, 125, 125, 183, 183, 183, 183, 126, 148, 91, 91, 91, 91, 91, 91, 91, 91, 91, 91, 127, 127, 184, 184, 184, 184, 184, 184, 184, 128, 128, 92, 92, 92, 129, 129, 185, 185, 185, 185 }; YYSTATIC YYCONST yyr_t YYFARDATA YYR2[]={ 0, 0, 2, 4, 4, 3, 1, 1, 1, 1, 1, 1, 4, 4, 4, 4, 1, 1, 1, 2, 2, 3, 2, 2, 1, 1, 1, 4, 1, 0, 2, 1, 3, 2, 4, 6, 1, 1, 1, 1, 3, 1, 1, 1, 1, 4, 4, 4, 4, 4, 4, 4, 2, 3, 2, 2, 2, 1, 1, 2, 1, 2, 4, 6, 3, 5, 7, 9, 3, 4, 7, 1, 1, 1, 2, 0, 2, 2, 0, 6, 2, 1, 1, 1, 1, 1, 1, 1, 1, 3, 2, 3, 1, 2, 3, 7, 0, 2, 2, 2, 2, 2, 3, 3, 2, 1, 4, 3, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 2, 2, 2, 5, 0, 2, 0, 2, 0, 2, 3, 1, 0, 1, 1, 3, 0, 3, 1, 1, 1, 1, 1, 0, 2, 4, 3, 0, 2, 3, 0, 1, 5, 3, 4, 4, 4, 1, 1, 1, 1, 1, 2, 2, 4, 13, 22, 1, 1, 5, 3, 7, 5, 4, 7, 0, 2, 2, 2, 2, 2, 2, 2, 5, 2, 2, 2, 2, 2, 2, 5, 0, 2, 0, 2, 0, 3, 9, 9, 7, 7, 1, 1, 1, 2, 2, 1, 4, 0, 1, 1, 2, 2, 2, 2, 1, 4, 2, 5, 3, 2, 2, 1, 4, 3, 0, 2, 2, 0, 2, 2, 2, 2, 2, 1, 1, 1, 1, 9, 0, 2, 2, 0, 2, 2, 2, 2, 1, 1, 1, 1, 1, 0, 4, 1, 3, 1, 13, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 5, 8, 6, 5, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4, 4, 5, 1, 1, 1, 0, 4, 4, 4, 4, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 5, 1, 0, 2, 2, 1, 2, 4, 5, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 4, 6, 4, 4, 11, 1, 5, 3, 7, 5, 5, 3, 1, 2, 2, 1, 2, 4, 4, 1, 2, 2, 2, 2, 2, 2, 2, 1, 2, 1, 1, 1, 4, 4, 2, 4, 2, 0, 1, 1, 3, 1, 3, 1, 0, 3, 5, 4, 3, 5, 5, 5, 5, 5, 5, 2, 2, 2, 2, 2, 2, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 3, 2, 0, 1, 1, 2, 1, 1, 1, 1, 4, 4, 5, 4, 4, 4, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 7, 7, 7, 7, 7, 0, 2, 2, 0, 2, 2, 0, 2, 0, 2, 0, 2, 0, 2, 0, 2, 0, 2, 2, 0, 2, 3, 2, 0, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 2, 2, 3, 2, 2, 2, 5, 3, 2, 2, 2, 2, 2, 5, 4, 6, 2, 4, 0, 3, 3, 1, 1, 0, 3, 0, 1, 1, 3, 0, 1, 1, 3, 1, 3, 4, 4, 4, 4, 5, 1, 1, 1, 1, 1, 1, 1, 3, 1, 3, 4, 1, 0, 10, 6, 5, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 3, 4, 6, 5, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 2, 2, 4, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 1, 0, 5, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 1, 3, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 3, 2, 3, 4, 2, 2, 2, 5, 5, 7, 4, 3, 2, 3, 2, 1, 1, 2, 3, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 3, 0, 1, 1, 3, 2, 6, 7, 3, 3, 3, 6, 0, 1, 3, 5, 6, 4, 4, 1, 3, 3, 1, 1, 1, 1, 4, 1, 6, 6, 6, 4, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 2, 5, 4, 7, 6, 7, 6, 9, 8, 3, 8, 4, 0, 2, 0, 1, 3, 3, 0, 2, 2, 2, 3, 2, 2, 2, 2, 2, 0, 2, 3, 1, 1, 1, 1, 3, 8, 2, 3, 1, 1, 3, 3, 3, 4, 6, 0, 2, 3, 1, 3, 1, 4, 3, 0, 2, 2, 2, 3, 3, 3, 3, 3, 3, 0, 2, 2, 3, 3, 4, 2, 1, 1, 3, 5, 0, 2, 2, 0, 2, 4, 3, 1, 1 }; YYSTATIC YYCONST short YYFARDATA YYCHK[]={ -1000,-109,-110,-111,-113,-114,-116,-117,-118,-119, -120,-121,-122,-124,-126,-128,-130,-131,-132, 525, 526, 460, 528, 529,-133,-134,-135, 532, 533,-139, 409,-152, 411,-170,-137, 455,-176, 463, 408, 470, 471, 430, -87, 431, -93, -94, 273, 449, 530, 534, 535, 536, 537, 538, 539, 540, 59,-138, 410, 412, 454, 447, 448, 450, -10, -11, 123, 123,-115, 123, 123, 123, 123, -9, 264, -9, 527, -88, -24, 265, 264, -24, 123,-140, 314, -1, -2, 261, 260, 263, -78, -16, 91,-171, 123,-174, 278, 38,-175, 286, 287, 284, 283, 282, 281, 288, -31, -32, 267, 91, -9, -90, 469, 469, -92, -1, 469, -86, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, -31, -86, 263, -28, -70, -74, -93, -94, 306, 297, 322, 323,-149, 33, 307, 276, 324, -52, 275, 91, -5, -76, 268, 413, 414, 415, 358, 357, 278, 298, 277, 281, 282, 283, 284, 286, 287, 279, 290, 291, 292, 293, 271, -1, 296, -1, -1, -1, -1, 262, -77,-172, 318, 379, 61, -73, 40, -75, -7, -76, 269, 272, 325, 340, -8, 295, 300, 302, 308, -31, -31,-112,-109, 125,-155, 416,-156, 418,-154, 420, 421,-117,-157, -2,-131,-120,-133, -132,-135, 472, 458, 508,-158, 507,-160, 419, -95, -96, -97, -98, -99,-108,-100,-101,-102,-103,-104, -105,-106,-107,-159,-163, 395, 396, 397, 398, 399, 400, 401, 402, 403, 404, 405, 406, 407, 123, 417, -123,-125,-127,-129, -9, -1, 461,-136, -70, -76, -141, 315, -71, -70, 91, -28,-149, 46, -7, 328, 329, 330, 331, 332, 326, 346, 353, 337, 365, 366, 367, 368, 369, 370, 371, 351, 378, 294, 372, -79, -9,-173,-174, 42, 40, -31, 40, -14, 91, 40, -14, 40, -14, 40, -14, 40, -14, 40, -14, 40, 41, 267, -9, 263, 58, 44, 262, -1, 354, 355, 356, 473, 379, 475, 476, 477, 478, -90, -91, -1, 329, 330, -1, -71, 41, -36, 61, 288, 262, 44, 390, 91, 38, 42, 359, 360, 361, 60, 390, 390, 390, 390, -70, 306, -70, -75, -7, 33, -9, -1, 280, 279, 289, -28, -1, -76, 42, 471, 47, -28, 270, 272, 281, 282, 283, 284, 40, -36, -1, 329, 330, 322, 345, 334, 335, 337, 338, 339, 340, 341, 342, 343, 344, 362, 355, 336, 352, 326, 371, 294, -2, 40, 61, -72, -71, -74, -28, -7, -7, 40, 301, 303, 304, 305, 41, 41, 125,-143,-114,-111, -144,-146,-116,-117,-131,-120,-132, 452, 453,-148, 508,-133,-135, 507, 321, 422, 427, 472, 408, 125, -9, -9, 40, 451, 58, 91, -9, -71, 357, 364, 541, 91,-161,-162,-164,-166,-167,-168, 311,-169, 309, 313, 312, -9, -2, -9, -24, 40, -23, -24, 266, 286, 287, -31, -9, -2, -75, -28, -76, 270, 272, -71, -36, 341,-175, -7, -72, 40,-115,-158, -2, -9, 125,-178, 462,-131,-179,-180, 467, 468, -181,-132,-135, 464, 125,-183,-177,-179,-182, 338, 462, 465, 125,-184, 460, 408, 463, 296,-132,-135, 125,-185, 460, 463,-132,-135, -89, 420, 125,-136, -142, -71, -1, 471, -7, -1, -13, 40, 40, -28, 328, 329, 330, 331, 377, 371, 326, 479, 365, 366, 367, 368, 375, 376, 294, 93, 125, 44, 40, -2, 41, -23, -9, -23, -24, -9, -9, -9, 93, -9, -9, 474, -1, -1, 330, 329, 327, 336, 390, 40, 61, 43, 123, 40, 40, 263, -1, 93, -30, -29, 275, -9, 40, 40, -54, -55, -28, -1, -1, -1, -1, -70, -28, -9, -1, 280, 93, 93, 93, -1, -1, -71, -1, 91, -9, -69, 60, 329, 330, 331, 365, 366, 367, 40, 61, -36, 123, 40, 41, -71, -3, 373, 374, -1, -9,-115, 123, 123, 123, -9, -9, 123, -71, 357, 364, 541, 364, -81, -82, -91, -25, -26, -27, 275, -13, 40, -9, 58, 274, -7, 91, -1, 91, -1, -9,-161,-165,-158, 310,-165, -165,-165, -71,-158, -2, -9, 40, 40, 41, -71, -1, 40, -31, -28, -6, -2, -9, 125, 316, 316, 466, -31, -66, -9, 42, -36, 61, -31, 61, -31, -31, 61, 61, -1, 469, -9, 469, 40, -1, 469, -177, 44, 93, -1, -28, -28, 91, -9, -36, -83, -1, 40, 40,-173, -36, 41, 41, 93, 41, 41, 41, 41, 41, -12, 263, 44, 58, 390, 329, 330, 331, 365, 366, 367, -1, -84, -85, -36, 123, 262, -64, -63, -71, 306, 44, 93, 44, 275, -71, -71, 62, 44, 42, -5, -5, -5, 93, 274, 41, -68, -19, -18, 43, 45, 306, 323, 373, -9, -59, -61, -73, 274, -53, -22, 60, 41, 125,-112,-145,-147, -127, 274, -7, 91, -1, 91, -1, -71, -71, -1, 371, 326, -7, 371, 326, -1, 41, 44, -28, -25, 93, -9, -3, -1, -28, -9, -9, 44, 93, -2, -9, -9, -24, 274, -36, 41, 40, 41, 44, 44, -2, -9, -9, 41, 58, 40, 41, 40, 41, 41, 40, 40, -5, -1, -9, 317, -1, -31, -71, 93, -38, 479, 504, 505, 506, -9, 41, 390, -83, 41, 387, 341, 342, 343, 388, 389, 301, 303, 304, 305, 391, 394, 294, -4, 317, -34, -33,-153, 480, 482, 483, 484, 485, 276, 277, 281, 282, 283, 284, 286, 287, 257, 279, 290, 291, 292, 293, 486, 487, 488, 490, 491, 492, 493, 494, 495, 496, 334, 497, 280, 289, 336, 498, 341, 489, 357, 390, 502, 271, 123, -9, 41, -14, -14, -14, -14, -14, -14, 317, 283, 284, 456, 457, 459, -9, -9, -1, 41, 44, 61, -59, 125, 44, 61, 263, 263, -29, -9, 41, 41, -28, 40, -5, -1, 62, -58, -1, 40, -19, 41, 125, -62, -40,-135, -41, 298, 364, 297, 286, 287, 284, 283, 282, 281, 293, 292, 291, 290, 279, 278, 277,-175, 61, -3, 40, 40, 91, -54, 125, 125, -150, 423, 424, 425, 426,-120,-132,-133,-135, 125, -151, 428, 429, 426,-132,-120,-133,-135, 125, -3, -28, -9, -9, 44, -93, 450, -1, -28, -27, -38, 41, 390, -71, 93, 93, -71, -35, 61, 316, 316, 41, 41, -1, 41, -25, -6, -6, -66, 41, -9, 41, -3, 40, 93, 93, 93, 93, -36, 41, 58, 58, 40, -35, -2, 41, 42, 91, -32, 40, 481, 501, 277, 281, 282, 283, 284, 280, -20, 40, -20, -20, -15, 510, 483, 484, 276, 277, 281, 282, 283, 284, 286, 287, 279, 290, 291, 292, 293, 42, 486, 487, 488, 490, 491, 494, 495, 497, 280, 289, 257, 511, 512, 513, 514, 515, 516, 517, 518, 519, 520, 521, 522, 523, 496, 488, 500, 41, -2, 263, 263, 44, -84, -37, -17, -9, 283, -36, -70, 319, 320, 125, -64, 123, 61, -25, -1, -67, 44, -56, -57, -71, -65,-135, 358, 363, 40, 91, 40, 91, 40, 91, 40, 91, 40, 91, 40, 91, 40, 91, 40, 91, 40, 91, 40, 91, 40, 91, 40, 91, 40, 91, 284, 283, 282, 281, 40, 91, 40, 91, -31, -36, 123, 40, -53, -22, -25, -25, -9, 62, -75, -75, -75, -75, -75, -75, -75, 509, -71, 93, 93, -71, -1, -2, -2, 274, 44, -39, -41, -36, 299, 286, 287, 284, 283, 282, 281, 279, 293, 292, 291, 290, 278, 277, -2, -9, 41, 58, -89, -69, -34, -83, 392, 393, 392, 393, -9, 93, -9, 43, 125, -36, 91, 91, 503, 44, 91, 524, 38, 281, 282, 283, 284, 280, -9, 40, 40, -62, 123, 41, -67, -68, 41, 44, -60, -52, 364, 297, 345, 299, 263, -9, 306, -70, 299, -9, -40, -9, -23, -9, -9, -23, -24, -9, -24, -9, -9, -9, -9, -9, -9, -9, -24, -9, -9, -9, -9, -9, -9, -9, 40, 91, 40, 91, 40, 91, 40, 91, -9, -9, -17, -9, 41, -59, 40, 40, 41, 41, 93, -7, 274, 44, 40, -3, -71, 284, 283, 282, 281, -66, 40, 41, 41, 41, 93, 43, -9, 44, -9, -9, 61, -36, 93, 263, -9, 281, 282, 283, -9, 125, -62, -71, -1, 91, 306, -70, 41, 41, 93, 263, 41, 41, 93, 41, 93, 41, 41, 93, 41, 41, 93, 41, 93, 41, 93, 41, 93, 41, 93, 41, 93, 41, 93, 41, 93, 41, 93, -24, -9, -9, -9, -9, -9, -9, -9, 41, 93, 41, 93, 125, -25, -25, 62, -28, -3, -71, -25, -21, -22, 60, 58, -25, -9, 93, -36, 93, 93, -9, 41, 58, 58, 58, 41, 125, 61, 93, 263, 40, 41, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 41, 93, 41, 93, 41, 93, 41, 93, 40, 40, 41, 41, -71, -21, 41, 40, -66, 41, 93, 44, 41, -33, 41, -9, -9, -9, -40, -49, -50, -51, -42, -43, -47, -46, -45, -44, -47, -46, -45, -44, 40, 40, 40, 40, -45, -48, 274, 40, -35, -25, -80, -36, 41, 41, 41, 41, 299, 263, 41, 299, 306, -70, 41, -40, 41, -23, -9, 41, -23, -24, 41, -24, 41, -9, 41, -9, 41, -9, 41, 41, 41, 41, -47, -46, -45, -44, 41, 41, -17, -3, -25, 41, 123, 324, 379, 380, 381, 308, 382, 383, 384, 385, 333, 347, 348, 349, 350, 294, 44, 263, 41, 41, 41, 41, 40, 41, 40, -36, -25, 509, -9, 41, 41, 357, 41, -7, -28, -71, 274, -3, -21, 40, -25, 41 }; YYSTATIC YYCONST short YYFARDATA YYDEF[]={ 1, -2, 2, 0, 0, 333, 6, 7, 8, 9, 10, 11, 0, 0, 0, 0, 16, 17, 18, 0, 0, 772, 0, 0, 24, 25, 26, 0, 28, 135, 0, 269, 206, 0, 431, 0, 0, 778, 105, 835, 92, 0, 431, 0, 83, 84, 85, 0, 0, 0, 0, 0, 0, 57, 58, 0, 60, 108, 262, 387, 0, 757, 758, 219, 431, 431, 139, 1, 0, 788, 806, 824, 838, 19, 41, 20, 0, 0, 22, 42, 43, 23, 29, 137, 0, 104, 38, 39, 36, 37, 219, 186, 0, 384, 0, 391, 0, 0, 431, 394, 394, 394, 394, 394, 394, 0, 0, 432, 433, 0, 760, 0, 778, 814, 0, 93, 0, 0, 742, 743, 744, 745, 746, 747, 748, 749, 750, 751, 752, 753, 754, 755, 756, 0, 0, 33, 0, 0, 0, 0, 0, 0, 668, 0, 0, 219, 0, 684, 685, 0, 689, 0, 0, 549, 233, 551, 552, 553, 554, 0, 489, 691, 692, 693, 694, 695, 696, 697, 698, 699, 0, 704, 705, 706, 707, 708, 555, 0, 52, 54, 55, 56, 59, 0, 386, 388, 389, 0, 61, 0, 71, 0, 212, 213, 214, 219, 219, 217, 0, 220, 221, 226, 0, 0, 0, 0, 5, 334, 0, 336, 0, 0, 340, 341, 342, 343, 0, 345, 346, 347, 348, 349, 0, 0, 0, 355, 0, 0, 332, 504, 0, 0, 0, 0, 431, 0, 219, 0, 0, 0, 219, 0, 0, 333, 0, 490, 491, 492, 493, 494, 495, 496, 497, 498, 499, 500, 501, 502, 362, 369, 0, 0, 0, 0, 21, 774, 773, 0, 29, 550, 107, 0, 136, 557, 0, 560, 219, 0, 311, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 0, 0, 0, 0, 0, 393, 0, 0, 0, 0, 405, 0, 0, 406, 0, 407, 0, 408, 0, 409, 0, 410, 430, 102, 434, 0, 759, 0, 0, 769, 777, 779, 780, 781, 0, 783, 784, 785, 786, 787, 0, 0, 833, 836, 837, 94, 718, 719, 720, 0, 0, 31, 0, 0, 711, 673, 674, 675, 0, 0, 534, 0, 0, 0, 0, 667, 0, 670, 228, 0, 0, 681, 683, 686, 0, 688, 690, 0, 0, 0, 0, 0, 0, 231, 232, 700, 701, 702, 703, 0, 53, 147, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 0, 131, 132, 133, 0, 0, 103, 0, 0, 72, 73, 0, 215, 216, 0, 222, 223, 224, 225, 64, 68, 3, 140, 333, 0, 0, 0, 168, 169, 170, 171, 172, 0, 0, 0, 0, 178, 179, 0, 0, 236, 250, 814, 105, 4, 335, 337, -2, 0, 344, 0, 0, 0, 219, 0, 0, 0, 363, 365, 0, 0, 0, 0, 0, 0, 379, 380, 377, 505, 506, 507, 508, 503, 509, 510, 44, 0, 0, 0, 512, 513, 514, 0, 517, 518, 519, 520, 521, 0, 431, 0, 525, 527, 0, 366, 0, 0, 12, 789, 0, 791, 792, 431, 0, 0, 431, 799, 800, 0, 13, 807, 431, 809, 431, 811, 0, 0, 14, 825, 0, 0, 0, 0, 831, 832, 15, 839, 0, 0, 842, 843, 771, 775, 27, 30, 138, 142, 0, 0, 0, 40, 0, 0, 292, 0, 187, 188, 189, 190, 191, 192, 193, 0, 195, 196, 197, 198, 199, 200, 0, 207, 390, 0, 0, 0, 398, 0, 0, 0, 0, 0, 0, 0, 96, 762, 0, 782, 804, 812, 815, 816, 817, 0, 0, 0, 0, 0, 722, 727, 728, 34, 47, 671, 0, 709, 712, 713, 0, 0, 0, 535, 536, 48, 49, 50, 51, 669, 0, 680, 682, 687, 0, 0, 0, 0, 556, 0, -2, 711, 0, 106, 154, 125, 126, 127, 128, 129, 130, 0, 385, 62, 75, 69, 219, 0, 532, 308, 309, -2, 0, 0, 139, 239, 253, 173, 174, 824, 0, 219, 0, 0, 0, 0, 219, 0, 0, 539, 540, 542, 0, -2, 0, 0, 0, 0, 0, 357, 0, 0, 0, 364, 370, 381, 0, 371, 372, 373, 378, 374, 375, 376, 0, 0, 511, 0, -2, 0, 0, 0, 0, 530, 531, 361, 0, 0, 0, 0, 0, 793, 794, 797, 0, 0, 0, 0, 0, 0, 0, 826, 0, 830, 0, 0, 0, 0, 431, 0, 558, 0, 0, 263, 0, 0, 292, 0, 202, 561, 0, 392, 0, 397, 394, 395, 394, 394, 394, 394, 394, 0, 761, 0, 0, 0, 818, 819, 820, 821, 822, 823, 834, 0, 729, 0, 75, 32, 0, 723, 0, 0, 0, 672, 711, 715, 0, 0, 679, 0, 674, 545, 546, 547, 0, 0, 227, 0, 0, 154, 149, 150, 151, 152, 153, 0, 0, 78, 65, 0, 0, 0, 534, 218, 164, 0, 0, 0, 0, 0, 0, 0, 181, 0, 0, 0, 0, -2, 237, 238, 0, 251, 252, 813, 338, 311, 263, 0, 350, 352, 353, 310, 0, 0, 0, 0, 204, 0, 0, 0, 0, 0, 0, 523, -2, 526, 527, 527, 367, 368, 790, 795, 0, 803, 798, 801, 808, 810, 776, 802, 827, 828, 0, 0, 841, 0, 141, 559, 0, 0, 0, 0, 0, 0, 288, 0, 0, 291, 293, 294, 295, 296, 297, 298, 299, 300, 301, 302, 0, 0, 0, 204, 0, 0, 265, 0, 0, 0, 566, 567, 568, 569, 570, 571, 572, 573, 574, 575, 576, 577, 0, 582, 583, 584, 585, 591, 592, 593, 594, 595, 596, 597, 616, 616, 600, 616, 618, 604, 606, 0, 608, 0, 610, 612, 0, 614, 615, 267, 0, 396, 399, 400, 401, 402, 403, 404, 0, 97, 98, 99, 100, 101, 764, 766, 805, 716, 0, 0, 0, 721, 722, 0, 37, 35, 710, 714, 676, 677, 537, -2, 548, 229, 148, 0, 158, 143, 155, 134, 63, 74, 76, 77, 438, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 431, 0, 532, -2, -2, 0, 0, 165, 166, 240, 219, 219, 219, 219, 245, 246, 247, 248, 167, 254, 219, 219, 219, 258, 259, 260, 261, 175, 0, 0, 0, 0, 0, 184, 219, 234, 0, 541, 543, 339, 0, 0, 356, 0, 359, 360, 0, 0, 0, 45, 46, 515, 522, 0, 528, 529, 0, 829, 840, 774, 147, 561, 312, 313, 314, 315, 292, 290, 0, 0, 0, 185, 203, 194, 586, 0, 0, 0, 0, 0, 611, 578, 579, 580, 581, 605, 598, 0, 599, 601, 602, 619, 620, 621, 622, 623, 624, 625, 626, 627, 628, 629, 0, 634, 635, 636, 637, 638, 642, 643, 644, 645, 646, 647, 648, 649, 650, 652, 653, 654, 655, 656, 657, 658, 659, 660, 661, 662, 663, 664, 665, 666, 607, 609, 613, 201, 95, 763, 765, 0, 730, 731, 734, 735, 0, 737, 0, 732, 733, 717, 724, 78, 0, 0, 158, 157, 154, 0, 144, 145, 0, 80, 81, 82, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 66, 75, 70, 0, 0, 0, 0, 0, 533, 241, 242, 243, 244, 255, 256, 257, 219, 0, 180, 0, 183, 0, 544, 351, 0, 0, 205, 435, 436, 437, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 382, 383, 524, 0, 770, 0, 0, 0, 303, 304, 305, 306, 0, 587, 0, 0, 266, 0, 0, 0, 0, 0, 0, 640, 641, 630, 631, 632, 633, 651, 768, 0, 0, 0, 78, 678, 156, 159, 160, 0, 0, 86, 87, 88, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 429, 0, -2, -2, 210, 211, 0, 0, 0, 0, -2, 161, 358, 0, 0, 0, 0, 0, -2, 264, 289, 307, 588, 0, 0, 0, 0, 0, 0, 603, 639, 767, 0, 0, 0, 0, 0, 725, 0, 146, 0, 0, 0, 90, 439, 440, 0, 0, 442, 443, 0, 444, 0, 411, 413, 0, 412, 414, 0, 415, 0, 416, 0, 417, 0, 418, 0, 423, 0, 424, 0, 425, 0, 426, 0, 0, 0, 0, 0, 0, 0, 0, 0, 427, 0, 428, 0, 67, 0, 0, 163, 0, 161, 182, 0, 0, 162, 0, 0, 0, 0, 590, 0, 564, 561, 0, 736, 0, 0, 0, 741, 726, 0, 91, 89, 480, 441, 483, 487, 464, 467, 470, 472, 474, 476, 470, 472, 474, 476, 419, 0, 420, 0, 421, 0, 422, 0, 474, 478, 208, 209, 0, 0, 204, -2, 796, 316, 589, 0, 563, 565, 617, 0, 0, 0, 79, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 470, 472, 474, 476, 0, 0, 0, -2, 249, 0, 0, 0, 738, 739, 740, 461, 481, 482, 462, 484, 0, 486, 463, 488, 445, 465, 466, 446, 468, 469, 447, 471, 448, 473, 449, 475, 450, 477, 451, 452, 453, 454, 0, 0, 0, 0, 459, 460, 479, 0, 0, 354, 268, 317, 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, 0, 0, 485, 455, 456, 457, 458, -2, 0, 0, 0, 0, 0, 0, 562, 176, 219, 331, 0, 0, 0, 0, 161, 0, -2, 0, 177 }; #ifdef YYRECOVER YYSTATIC YYCONST short yyrecover[] = { -1000 }; #endif /* SCCSWHAT( "@(#)yypars.c 3.1 88/11/16 22:00:49 " ) */ #line 3 "F:\\NetFXDev1\\src\\tools\\devdiv\\amd64\\yypars.c" #if ! defined(YYAPI_PACKAGE) /* ** YYAPI_TOKENNAME : name used for return value of yylex ** YYAPI_TOKENTYPE : type of the token ** YYAPI_TOKENEME(t) : the value of the token that the parser should see ** YYAPI_TOKENNONE : the representation when there is no token ** YYAPI_VALUENAME : the name of the value of the token ** YYAPI_VALUETYPE : the type of the value of the token (if null, then the value is derivable from the token itself) ** YYAPI_VALUEOF(v) : how to get the value of the token. */ #define YYAPI_TOKENNAME yychar #define YYAPI_TOKENTYPE int #define YYAPI_TOKENEME(t) (t) #define YYAPI_TOKENNONE -1 #define YYAPI_TOKENSTR(t) (sprintf_s(yytokbuf, ARRAY_SIZE(yytokbuf), "%d", t), yytokbuf) #define YYAPI_VALUENAME yylval #define YYAPI_VALUETYPE YYSTYPE #define YYAPI_VALUEOF(v) (v) #endif #if ! defined(YYAPI_CALLAFTERYYLEX) #define YYAPI_CALLAFTERYYLEX #endif # define YYFLAG -1000 # define YYERROR goto yyerrlab # define YYACCEPT return(0) # define YYABORT return(1) #ifdef YYDEBUG /* RRR - 10/9/85 */ char yytokbuf[20]; # ifndef YYDBFLG # define YYDBFLG (yydebug) # endif # define yyprintf(a, b, c, d) if (YYDBFLG) YYPRINT(a, b, c, d) #else # define yyprintf(a, b, c, d) #endif #ifndef YYPRINT #define YYPRINT printf #endif /* parser for yacc output */ #ifdef YYDUMP int yydump = 1; /* 1 for dumping */ void yydumpinfo(void); #endif #ifdef YYDEBUG YYSTATIC int yydebug = 0; /* 1 for debugging */ #endif YYSTATIC YYSTYPE yyv[YYMAXDEPTH]; /* where the values are stored */ YYSTATIC short yys[YYMAXDEPTH]; /* the parse stack */ #if ! defined(YYRECURSIVE) YYSTATIC YYAPI_TOKENTYPE YYAPI_TOKENNAME = YYAPI_TOKENNONE; #if defined(YYAPI_VALUETYPE) // YYSTATIC YYAPI_VALUETYPE YYAPI_VALUENAME; FIX #endif YYSTATIC int yynerrs = 0; /* number of errors */ YYSTATIC short yyerrflag = 0; /* error recovery flag */ #endif #ifdef YYRECOVER /* ** yyscpy : copy f onto t and return a ptr to the null terminator at the ** end of t. */ YYSTATIC char *yyscpy(register char*t, register char*f) { while(*t = *f++) t++; return(t); /* ptr to the null char */ } #endif #ifndef YYNEAR #define YYNEAR #endif #ifndef YYPASCAL #define YYPASCAL #endif #ifndef YYLOCAL #define YYLOCAL #endif #if ! defined YYPARSER #define YYPARSER yyparse #endif #if ! defined YYLEX #define YYLEX yylex #endif #if defined(YYRECURSIVE) YYSTATIC YYAPI_TOKENTYPE YYAPI_TOKENNAME = YYAPI_TOKENNONE; #if defined(YYAPI_VALUETYPE) YYSTATIC YYAPI_VALUETYPE YYAPI_VALUENAME; #endif YYSTATIC int yynerrs = 0; /* number of errors */ YYSTATIC short yyerrflag = 0; /* error recovery flag */ YYSTATIC short yyn; YYSTATIC short yystate = 0; YYSTATIC short *yyps= &yys[-1]; YYSTATIC YYSTYPE *yypv= &yyv[-1]; YYSTATIC short yyj; YYSTATIC short yym; #endif #pragma warning(disable:102) YYLOCAL YYNEAR YYPASCAL YYPARSER() { #if ! defined(YYRECURSIVE) register short yyn; short yystate, *yyps; YYSTYPE *yypv; short yyj, yym; YYAPI_TOKENNAME = YYAPI_TOKENNONE; yystate = 0; #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable:6200) // Index '-1' is out of valid index range...for non-stack buffer... #endif yyps= &yys[-1]; yypv= &yyv[-1]; #ifdef _PREFAST_ #pragma warning(pop) #endif #endif #ifdef YYDUMP yydumpinfo(); #endif yystack: /* put a state and value onto the stack */ #ifdef YYDEBUG if(YYAPI_TOKENNAME == YYAPI_TOKENNONE) { yyprintf( "state %d, token # '%d'\n", yystate, -1, 0 ); } else { yyprintf( "state %d, token # '%s'\n", yystate, YYAPI_TOKENSTR(YYAPI_TOKENNAME), 0 ); } #endif if( ++yyps > &yys[YYMAXDEPTH] ) { yyerror( "yacc stack overflow" ); return(1); } *yyps = yystate; ++yypv; *yypv = yyval; yynewstate: yyn = YYPACT[yystate]; if( yyn <= YYFLAG ) { /* simple state, no lookahead */ goto yydefault; } if( YYAPI_TOKENNAME == YYAPI_TOKENNONE ) { /* need a lookahead */ YYAPI_TOKENNAME = YYLEX(); YYAPI_CALLAFTERYYLEX(YYAPI_TOKENNAME); } if( ((yyn += YYAPI_TOKENEME(YYAPI_TOKENNAME)) < 0) || (yyn >= YYLAST) ) { goto yydefault; } if( YYCHK[ yyn = YYACT[ yyn ] ] == YYAPI_TOKENEME(YYAPI_TOKENNAME) ) { /* valid shift */ yyval = YYAPI_VALUEOF(YYAPI_VALUENAME); yystate = yyn; yyprintf( "SHIFT: saw token '%s', now in state %4d\n", YYAPI_TOKENSTR(YYAPI_TOKENNAME), yystate, 0 ); YYAPI_TOKENNAME = YYAPI_TOKENNONE; if( yyerrflag > 0 ) { --yyerrflag; } goto yystack; } yydefault: /* default state action */ if( (yyn = YYDEF[yystate]) == -2 ) { register YYCONST short *yyxi; if( YYAPI_TOKENNAME == YYAPI_TOKENNONE ) { YYAPI_TOKENNAME = YYLEX(); YYAPI_CALLAFTERYYLEX(YYAPI_TOKENNAME); yyprintf("LOOKAHEAD: token '%s'\n", YYAPI_TOKENSTR(YYAPI_TOKENNAME), 0, 0); } /* ** search exception table, we find a -1 followed by the current state. ** if we find one, we'll look through terminal,state pairs. if we find ** a terminal which matches the current one, we have a match. ** the exception table is when we have a reduce on a terminal. */ #if YYOPTTIME yyxi = yyexca + yyexcaind[yystate]; while(( *yyxi != YYAPI_TOKENEME(YYAPI_TOKENNAME) ) && ( *yyxi >= 0 )){ yyxi += 2; } #else for(yyxi = yyexca; (*yyxi != (-1)) || (yyxi[1] != yystate); yyxi += 2 ) { ; /* VOID */ } while( *(yyxi += 2) >= 0 ){ if( *yyxi == YYAPI_TOKENEME(YYAPI_TOKENNAME) ) { break; } } #endif if( (yyn = yyxi[1]) < 0 ) { return(0); /* accept */ } } if( yyn == 0 ){ /* error */ /* error ... attempt to resume parsing */ switch( yyerrflag ){ case 0: /* brand new error */ #ifdef YYRECOVER { register int i,j; for(i = 0; (yyrecover[i] != -1000) && (yystate > yyrecover[i]); i += 3 ) { ; } if(yystate == yyrecover[i]) { yyprintf("recovered, from state %d to state %d on token # %d\n", yystate,yyrecover[i+2],yyrecover[i+1] ); j = yyrecover[i + 1]; if(j < 0) { /* ** here we have one of the injection set, so we're not quite ** sure that the next valid thing will be a shift. so we'll ** count it as an error and continue. ** actually we're not absolutely sure that the next token ** we were supposed to get is the one when j > 0. for example, ** for(+) {;} error recovery with yyerrflag always set, stops ** after inserting one ; before the +. at the point of the +, ** we're pretty sure the caller wants a 'for' loop. without ** setting the flag, when we're almost absolutely sure, we'll ** give them one, since the only thing we can shift on this ** error is after finding an expression followed by a + */ yyerrflag++; j = -j; } if(yyerrflag <= 1) { /* only on first insertion */ yyrecerr(YYAPI_TOKENNAME, j); /* what was, what should be first */ } yyval = yyeval(j); yystate = yyrecover[i + 2]; goto yystack; } } #endif yyerror("syntax error"); yyerrlab: ++yynerrs; FALLTHROUGH; case 1: case 2: /* incompletely recovered error ... try again */ yyerrflag = 3; /* find a state where "error" is a legal shift action */ while ( yyps >= yys ) { yyn = YYPACT[*yyps] + YYERRCODE; if( yyn>= 0 && yyn < YYLAST && YYCHK[YYACT[yyn]] == YYERRCODE ){ yystate = YYACT[yyn]; /* simulate a shift of "error" */ yyprintf( "SHIFT 'error': now in state %4d\n", yystate, 0, 0 ); goto yystack; } yyn = YYPACT[*yyps]; /* the current yyps has no shift onn "error", pop stack */ yyprintf( "error recovery pops state %4d, uncovers %4d\n", *yyps, yyps[-1], 0 ); --yyps; --yypv; } /* there is no state on the stack with an error shift ... abort */ yyabort: return(1); case 3: /* no shift yet; clobber input char */ yyprintf( "error recovery discards token '%s'\n", YYAPI_TOKENSTR(YYAPI_TOKENNAME), 0, 0 ); if( YYAPI_TOKENEME(YYAPI_TOKENNAME) == 0 ) goto yyabort; /* don't discard EOF, quit */ YYAPI_TOKENNAME = YYAPI_TOKENNONE; goto yynewstate; /* try again in the same state */ } } /* reduction by production yyn */ yyreduce: { register YYSTYPE *yypvt; yypvt = yypv; yyps -= YYR2[yyn]; yypv -= YYR2[yyn]; yyval = yypv[1]; yyprintf("REDUCE: rule %4d, popped %2d tokens, uncovered state %4d, ",yyn, YYR2[yyn], *yyps); yym = yyn; yyn = YYR1[yyn]; /* consult goto table to find next state */ yyj = YYPGO[yyn] + *yyps + 1; if( (yyj >= YYLAST) || (YYCHK[ yystate = YYACT[yyj] ] != -yyn) ) { yystate = YYACT[YYPGO[yyn]]; } yyprintf("goto state %4d\n", yystate, 0, 0); #ifdef YYDUMP yydumpinfo(); #endif switch(yym){ case 3: #line 194 "asmparse.y" { PASM->EndClass(); } break; case 4: #line 195 "asmparse.y" { PASM->EndNameSpace(); } break; case 5: #line 196 "asmparse.y" { if(PASM->m_pCurMethod->m_ulLines[1] ==0) { PASM->m_pCurMethod->m_ulLines[1] = PASM->m_ulCurLine; PASM->m_pCurMethod->m_ulColumns[1]=PASM->m_ulCurColumn;} PASM->EndMethod(); } break; case 12: #line 206 "asmparse.y" { PASMM->EndAssembly(); } break; case 13: #line 207 "asmparse.y" { PASMM->EndAssembly(); } break; case 14: #line 208 "asmparse.y" { PASMM->EndComType(); } break; case 15: #line 209 "asmparse.y" { PASMM->EndManifestRes(); } break; case 19: #line 213 "asmparse.y" { #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable:22011) // Suppress PREFast warning about integer overflow/underflow #endif PASM->m_dwSubsystem = yypvt[-0].int32; #ifdef _PREFAST_ #pragma warning(pop) #endif } break; case 20: #line 223 "asmparse.y" { PASM->m_dwComImageFlags = yypvt[-0].int32; } break; case 21: #line 224 "asmparse.y" { PASM->m_dwFileAlignment = yypvt[-0].int32; if((yypvt[-0].int32 & (yypvt[-0].int32 - 1))||(yypvt[-0].int32 < 0x200)||(yypvt[-0].int32 > 0x10000)) PASM->report->error("Invalid file alignment, must be power of 2 from 0x200 to 0x10000\n");} break; case 22: #line 227 "asmparse.y" { PASM->m_stBaseAddress = (ULONGLONG)(*(yypvt[-0].int64)); delete yypvt[-0].int64; if(PASM->m_stBaseAddress & 0xFFFF) PASM->report->error("Invalid image base, must be 0x10000-aligned\n");} break; case 23: #line 230 "asmparse.y" { PASM->m_stSizeOfStackReserve = (size_t)(*(yypvt[-0].int64)); delete yypvt[-0].int64; } break; case 28: #line 235 "asmparse.y" { PASM->m_fIsMscorlib = TRUE; } break; case 31: #line 242 "asmparse.y" { yyval.binstr = yypvt[-0].binstr; } break; case 32: #line 243 "asmparse.y" { yyval.binstr = yypvt[-2].binstr; yyval.binstr->append(yypvt[-0].binstr); delete yypvt[-0].binstr; } break; case 33: #line 246 "asmparse.y" { LPCSTRToGuid(yypvt[-0].string,&(PASM->m_guidLang)); } break; case 34: #line 247 "asmparse.y" { LPCSTRToGuid(yypvt[-2].string,&(PASM->m_guidLang)); LPCSTRToGuid(yypvt[-0].string,&(PASM->m_guidLangVendor));} break; case 35: #line 249 "asmparse.y" { LPCSTRToGuid(yypvt[-4].string,&(PASM->m_guidLang)); LPCSTRToGuid(yypvt[-2].string,&(PASM->m_guidLangVendor)); LPCSTRToGuid(yypvt[-2].string,&(PASM->m_guidDoc));} break; case 36: #line 254 "asmparse.y" { yyval.string = yypvt[-0].string; } break; case 37: #line 255 "asmparse.y" { yyval.string = yypvt[-0].string; } break; case 38: #line 258 "asmparse.y" { yyval.string = yypvt[-0].string; } break; case 39: #line 259 "asmparse.y" { yyval.string = yypvt[-0].string; } break; case 40: #line 260 "asmparse.y" { yyval.string = newStringWDel(yypvt[-2].string, '.', yypvt[-0].string); } break; case 41: #line 263 "asmparse.y" { yyval.int32 = yypvt[-0].int32; } break; case 42: #line 266 "asmparse.y" { yyval.int64 = yypvt[-0].int64; } break; case 43: #line 267 "asmparse.y" { yyval.int64 = neg ? new __int64(yypvt[-0].int32) : new __int64((unsigned)yypvt[-0].int32); } break; case 44: #line 270 "asmparse.y" { yyval.float64 = yypvt[-0].float64; } break; case 45: #line 271 "asmparse.y" { float f; *((__int32*) (&f)) = yypvt[-1].int32; yyval.float64 = new double(f); } break; case 46: #line 272 "asmparse.y" { yyval.float64 = (double*) yypvt[-1].int64; } break; case 47: #line 276 "asmparse.y" { PASM->AddTypeDef(yypvt[-2].binstr,yypvt[-0].string); } break; case 48: #line 277 "asmparse.y" { PASM->AddTypeDef(yypvt[-2].token,yypvt[-0].string); } break; case 49: #line 278 "asmparse.y" { PASM->AddTypeDef(yypvt[-2].token,yypvt[-0].string); } break; case 50: #line 279 "asmparse.y" { yypvt[-2].cad->tkOwner = 0; PASM->AddTypeDef(yypvt[-2].cad,yypvt[-0].string); } break; case 51: #line 280 "asmparse.y" { PASM->AddTypeDef(yypvt[-2].cad,yypvt[-0].string); } break; case 52: #line 285 "asmparse.y" { DefineVar(yypvt[-0].string, NULL); } break; case 53: #line 286 "asmparse.y" { DefineVar(yypvt[-1].string, yypvt[-0].binstr); } break; case 54: #line 287 "asmparse.y" { UndefVar(yypvt[-0].string); } break; case 55: #line 288 "asmparse.y" { SkipToken = !IsVarDefined(yypvt[-0].string); IfEndif++; } break; case 56: #line 291 "asmparse.y" { SkipToken = IsVarDefined(yypvt[-0].string); IfEndif++; } break; case 57: #line 294 "asmparse.y" { if(IfEndif == 1) SkipToken = !SkipToken;} break; case 58: #line 295 "asmparse.y" { if(IfEndif == 0) PASM->report->error("Unmatched #endif\n"); else IfEndif--; } break; case 59: #line 299 "asmparse.y" { _ASSERTE(!"yylex should have dealt with this"); } break; case 60: #line 300 "asmparse.y" { } break; case 61: #line 304 "asmparse.y" { yyval.cad = new CustomDescr(PASM->m_tkCurrentCVOwner, yypvt[-0].token, NULL); } break; case 62: #line 305 "asmparse.y" { yyval.cad = new CustomDescr(PASM->m_tkCurrentCVOwner, yypvt[-2].token, yypvt[-0].binstr); } break; case 63: #line 306 "asmparse.y" { yyval.cad = new CustomDescr(PASM->m_tkCurrentCVOwner, yypvt[-4].token, yypvt[-1].binstr); } break; case 64: #line 307 "asmparse.y" { yyval.cad = new CustomDescr(PASM->m_tkCurrentCVOwner, yypvt[-2].int32, yypvt[-1].binstr); } break; case 65: #line 310 "asmparse.y" { yyval.cad = new CustomDescr(yypvt[-2].token, yypvt[-0].token, NULL); } break; case 66: #line 311 "asmparse.y" { yyval.cad = new CustomDescr(yypvt[-4].token, yypvt[-2].token, yypvt[-0].binstr); } break; case 67: #line 313 "asmparse.y" { yyval.cad = new CustomDescr(yypvt[-6].token, yypvt[-4].token, yypvt[-1].binstr); } break; case 68: #line 314 "asmparse.y" { yyval.cad = new CustomDescr(PASM->m_tkCurrentCVOwner, yypvt[-2].int32, yypvt[-1].binstr); } break; case 69: #line 317 "asmparse.y" { yyval.int32 = yypvt[-2].token; bParsingByteArray = TRUE; } break; case 70: #line 321 "asmparse.y" { PASM->m_pCustomDescrList = NULL; PASM->m_tkCurrentCVOwner = yypvt[-4].token; yyval.int32 = yypvt[-2].token; bParsingByteArray = TRUE; } break; case 71: #line 326 "asmparse.y" { yyval.token = yypvt[-0].token; } break; case 72: #line 329 "asmparse.y" { yyval.token = yypvt[-0].token; } break; case 73: #line 330 "asmparse.y" { yyval.token = yypvt[-0].token; } break; case 74: #line 334 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->appendInt16(VAL16(nCustomBlobNVPairs)); yyval.binstr->append(yypvt[-0].binstr); nCustomBlobNVPairs = 0; } break; case 75: #line 340 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt16(VAL16(0x0001)); } break; case 76: #line 341 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; AppendFieldToCustomBlob(yyval.binstr,yypvt[-0].binstr); } break; case 77: #line 343 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; } break; case 78: #line 346 "asmparse.y" { yyval.binstr = new BinStr(); } break; case 79: #line 348 "asmparse.y" { yyval.binstr = yypvt[-5].binstr; yyval.binstr->appendInt8(yypvt[-4].int32); yyval.binstr->append(yypvt[-3].binstr); AppendStringWithLength(yyval.binstr,yypvt[-2].string); AppendFieldToCustomBlob(yyval.binstr,yypvt[-0].binstr); nCustomBlobNVPairs++; } break; case 80: #line 353 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; } break; case 81: #line 356 "asmparse.y" { yyval.int32 = SERIALIZATION_TYPE_FIELD; } break; case 82: #line 357 "asmparse.y" { yyval.int32 = SERIALIZATION_TYPE_PROPERTY; } break; case 83: #line 360 "asmparse.y" { if(yypvt[-0].cad->tkOwner && !yypvt[-0].cad->tkInterfacePair) PASM->DefineCV(yypvt[-0].cad); else if(PASM->m_pCustomDescrList) PASM->m_pCustomDescrList->PUSH(yypvt[-0].cad); } break; case 84: #line 364 "asmparse.y" { PASM->DefineCV(yypvt[-0].cad); } break; case 85: #line 365 "asmparse.y" { CustomDescr* pNew = new CustomDescr(yypvt[-0].tdd->m_pCA); if(pNew->tkOwner == 0) pNew->tkOwner = PASM->m_tkCurrentCVOwner; if(pNew->tkOwner) PASM->DefineCV(pNew); else if(PASM->m_pCustomDescrList) PASM->m_pCustomDescrList->PUSH(pNew); } break; case 86: #line 373 "asmparse.y" { yyval.binstr = yypvt[-0].binstr; } break; case 87: #line 374 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(SERIALIZATION_TYPE_TYPE); } break; case 88: #line 375 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(SERIALIZATION_TYPE_TAGGED_OBJECT); } break; case 89: #line 376 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(SERIALIZATION_TYPE_ENUM); AppendStringWithLength(yyval.binstr,yypvt[-0].string); } break; case 90: #line 378 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(SERIALIZATION_TYPE_ENUM); AppendStringWithLength(yyval.binstr,PASM->ReflectionNotation(yypvt[-0].token)); } break; case 91: #line 380 "asmparse.y" { yyval.binstr = yypvt[-2].binstr; yyval.binstr->insertInt8(ELEMENT_TYPE_SZARRAY); } break; case 92: #line 385 "asmparse.y" { PASMM->SetModuleName(NULL); PASM->m_tkCurrentCVOwner=1; } break; case 93: #line 386 "asmparse.y" { PASMM->SetModuleName(yypvt[-0].string); PASM->m_tkCurrentCVOwner=1; } break; case 94: #line 387 "asmparse.y" { BinStr* pbs = new BinStr(); unsigned L = (unsigned)strlen(yypvt[-0].string); memcpy((char*)(pbs->getBuff(L)),yypvt[-0].string,L); PASM->EmitImport(pbs); delete pbs;} break; case 95: #line 394 "asmparse.y" { /*PASM->SetDataSection(); PASM->EmitDataLabel($7);*/ PASM->m_VTFList.PUSH(new VTFEntry((USHORT)yypvt[-4].int32, (USHORT)yypvt[-2].int32, yypvt[-0].string)); } break; case 96: #line 398 "asmparse.y" { yyval.int32 = 0; } break; case 97: #line 399 "asmparse.y" { yyval.int32 = yypvt[-1].int32 | COR_VTABLE_32BIT; } break; case 98: #line 400 "asmparse.y" { yyval.int32 = yypvt[-1].int32 | COR_VTABLE_64BIT; } break; case 99: #line 401 "asmparse.y" { yyval.int32 = yypvt[-1].int32 | COR_VTABLE_FROM_UNMANAGED; } break; case 100: #line 402 "asmparse.y" { yyval.int32 = yypvt[-1].int32 | COR_VTABLE_CALL_MOST_DERIVED; } break; case 101: #line 403 "asmparse.y" { yyval.int32 = yypvt[-1].int32 | COR_VTABLE_FROM_UNMANAGED_RETAIN_APPDOMAIN; } break; case 102: #line 406 "asmparse.y" { PASM->m_pVTable = yypvt[-1].binstr; } break; case 103: #line 409 "asmparse.y" { bParsingByteArray = TRUE; } break; case 104: #line 413 "asmparse.y" { PASM->StartNameSpace(yypvt[-0].string); } break; case 105: #line 416 "asmparse.y" { newclass = TRUE; } break; case 106: #line 419 "asmparse.y" { if(yypvt[-0].typarlist) FixupConstraints(); PASM->StartClass(yypvt[-1].string, yypvt[-2].classAttr, yypvt[-0].typarlist); TyParFixupList.RESET(false); newclass = FALSE; } break; case 107: #line 425 "asmparse.y" { PASM->AddClass(); } break; case 108: #line 428 "asmparse.y" { yyval.classAttr = (CorRegTypeAttr) 0; } break; case 109: #line 429 "asmparse.y" { yyval.classAttr = (CorRegTypeAttr) ((yypvt[-1].classAttr & ~tdVisibilityMask) | tdPublic); } break; case 110: #line 430 "asmparse.y" { yyval.classAttr = (CorRegTypeAttr) ((yypvt[-1].classAttr & ~tdVisibilityMask) | tdNotPublic); } break; case 111: #line 431 "asmparse.y" { yyval.classAttr = (CorRegTypeAttr) (yypvt[-1].classAttr | 0x80000000 | tdSealed); } break; case 112: #line 432 "asmparse.y" { yyval.classAttr = (CorRegTypeAttr) (yypvt[-1].classAttr | 0x40000000); } break; case 113: #line 433 "asmparse.y" { yyval.classAttr = (CorRegTypeAttr) (yypvt[-1].classAttr | tdInterface | tdAbstract); } break; case 114: #line 434 "asmparse.y" { yyval.classAttr = (CorRegTypeAttr) (yypvt[-1].classAttr | tdSealed); } break; case 115: #line 435 "asmparse.y" { yyval.classAttr = (CorRegTypeAttr) (yypvt[-1].classAttr | tdAbstract); } break; case 116: #line 436 "asmparse.y" { yyval.classAttr = (CorRegTypeAttr) ((yypvt[-1].classAttr & ~tdLayoutMask) | tdAutoLayout); } break; case 117: #line 437 "asmparse.y" { yyval.classAttr = (CorRegTypeAttr) ((yypvt[-1].classAttr & ~tdLayoutMask) | tdSequentialLayout); } break; case 118: #line 438 "asmparse.y" { yyval.classAttr = (CorRegTypeAttr) ((yypvt[-1].classAttr & ~tdLayoutMask) | tdExplicitLayout); } break; case 119: #line 439 "asmparse.y" { yyval.classAttr = (CorRegTypeAttr) ((yypvt[-1].classAttr & ~tdStringFormatMask) | tdAnsiClass); } break; case 120: #line 440 "asmparse.y" { yyval.classAttr = (CorRegTypeAttr) ((yypvt[-1].classAttr & ~tdStringFormatMask) | tdUnicodeClass); } break; case 121: #line 441 "asmparse.y" { yyval.classAttr = (CorRegTypeAttr) ((yypvt[-1].classAttr & ~tdStringFormatMask) | tdAutoClass); } break; case 122: #line 442 "asmparse.y" { yyval.classAttr = (CorRegTypeAttr) (yypvt[-1].classAttr | tdImport); } break; case 123: #line 443 "asmparse.y" { yyval.classAttr = (CorRegTypeAttr) (yypvt[-1].classAttr | tdSerializable); } break; case 124: #line 444 "asmparse.y" { yyval.classAttr = (CorRegTypeAttr) (yypvt[-1].classAttr | tdWindowsRuntime); } break; case 125: #line 445 "asmparse.y" { yyval.classAttr = (CorRegTypeAttr) ((yypvt[-2].classAttr & ~tdVisibilityMask) | tdNestedPublic); } break; case 126: #line 446 "asmparse.y" { yyval.classAttr = (CorRegTypeAttr) ((yypvt[-2].classAttr & ~tdVisibilityMask) | tdNestedPrivate); } break; case 127: #line 447 "asmparse.y" { yyval.classAttr = (CorRegTypeAttr) ((yypvt[-2].classAttr & ~tdVisibilityMask) | tdNestedFamily); } break; case 128: #line 448 "asmparse.y" { yyval.classAttr = (CorRegTypeAttr) ((yypvt[-2].classAttr & ~tdVisibilityMask) | tdNestedAssembly); } break; case 129: #line 449 "asmparse.y" { yyval.classAttr = (CorRegTypeAttr) ((yypvt[-2].classAttr & ~tdVisibilityMask) | tdNestedFamANDAssem); } break; case 130: #line 450 "asmparse.y" { yyval.classAttr = (CorRegTypeAttr) ((yypvt[-2].classAttr & ~tdVisibilityMask) | tdNestedFamORAssem); } break; case 131: #line 451 "asmparse.y" { yyval.classAttr = (CorRegTypeAttr) (yypvt[-1].classAttr | tdBeforeFieldInit); } break; case 132: #line 452 "asmparse.y" { yyval.classAttr = (CorRegTypeAttr) (yypvt[-1].classAttr | tdSpecialName); } break; case 133: #line 453 "asmparse.y" { yyval.classAttr = (CorRegTypeAttr) (yypvt[-1].classAttr); } break; case 134: #line 454 "asmparse.y" { yyval.classAttr = (CorRegTypeAttr) (yypvt[-1].int32); } break; case 136: #line 458 "asmparse.y" { PASM->m_crExtends = yypvt[-0].token; } break; case 141: #line 469 "asmparse.y" { PASM->AddToImplList(yypvt[-0].token); } break; case 142: #line 470 "asmparse.y" { PASM->AddToImplList(yypvt[-0].token); } break; case 143: #line 474 "asmparse.y" { yyval.binstr = new BinStr(); } break; case 144: #line 475 "asmparse.y" { yyval.binstr = yypvt[-0].binstr; } break; case 145: #line 478 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt32(yypvt[-0].token); } break; case 146: #line 479 "asmparse.y" { yyval.binstr = yypvt[-2].binstr; yyval.binstr->appendInt32(yypvt[-0].token); } break; case 147: #line 482 "asmparse.y" { yyval.typarlist = NULL; PASM->m_TyParList = NULL;} break; case 148: #line 483 "asmparse.y" { yyval.typarlist = yypvt[-1].typarlist; PASM->m_TyParList = yypvt[-1].typarlist;} break; case 149: #line 486 "asmparse.y" { yyval.int32 = gpCovariant; } break; case 150: #line 487 "asmparse.y" { yyval.int32 = gpContravariant; } break; case 151: #line 488 "asmparse.y" { yyval.int32 = gpReferenceTypeConstraint; } break; case 152: #line 489 "asmparse.y" { yyval.int32 = gpNotNullableValueTypeConstraint; } break; case 153: #line 490 "asmparse.y" { yyval.int32 = gpDefaultConstructorConstraint; } break; case 154: #line 493 "asmparse.y" { yyval.int32 = 0; } break; case 155: #line 494 "asmparse.y" { yyval.int32 = yypvt[-1].int32 | yypvt[-0].int32; } break; case 156: #line 497 "asmparse.y" {yyval.typarlist = new TyParList(yypvt[-3].int32, yypvt[-2].binstr, yypvt[-1].string, yypvt[-0].typarlist);} break; case 157: #line 498 "asmparse.y" {yyval.typarlist = new TyParList(yypvt[-2].int32, NULL, yypvt[-1].string, yypvt[-0].typarlist);} break; case 158: #line 501 "asmparse.y" { yyval.typarlist = NULL; } break; case 159: #line 502 "asmparse.y" { yyval.typarlist = yypvt[-0].typarlist; } break; case 160: #line 505 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; } break; case 161: #line 508 "asmparse.y" { yyval.int32= 0; } break; case 162: #line 509 "asmparse.y" { yyval.int32 = yypvt[-0].int32; } break; case 163: #line 512 "asmparse.y" { yyval.int32 = yypvt[-2].int32; } break; case 164: #line 516 "asmparse.y" { if(PASM->m_pCurMethod->m_ulLines[1] ==0) { PASM->m_pCurMethod->m_ulLines[1] = PASM->m_ulCurLine; PASM->m_pCurMethod->m_ulColumns[1]=PASM->m_ulCurColumn;} PASM->EndMethod(); } break; case 165: #line 520 "asmparse.y" { PASM->EndClass(); } break; case 166: #line 521 "asmparse.y" { PASM->EndEvent(); } break; case 167: #line 522 "asmparse.y" { PASM->EndProp(); } break; case 173: #line 528 "asmparse.y" { PASM->m_pCurClass->m_ulSize = yypvt[-0].int32; } break; case 174: #line 529 "asmparse.y" { PASM->m_pCurClass->m_ulPack = yypvt[-0].int32; } break; case 175: #line 530 "asmparse.y" { PASMM->EndComType(); } break; case 176: #line 532 "asmparse.y" { BinStr *sig1 = parser->MakeSig(yypvt[-7].int32, yypvt[-6].binstr, yypvt[-1].binstr); BinStr *sig2 = new BinStr(); sig2->append(sig1); PASM->AddMethodImpl(yypvt[-11].token,yypvt[-9].string,sig1,yypvt[-5].token,yypvt[-3].string,sig2); PASM->ResetArgNameList(); } break; case 177: #line 538 "asmparse.y" { PASM->AddMethodImpl(yypvt[-17].token,yypvt[-15].string, (yypvt[-14].int32==0 ? parser->MakeSig(yypvt[-19].int32,yypvt[-18].binstr,yypvt[-12].binstr) : parser->MakeSig(yypvt[-19].int32| IMAGE_CEE_CS_CALLCONV_GENERIC,yypvt[-18].binstr,yypvt[-12].binstr,yypvt[-14].int32)), yypvt[-6].token,yypvt[-4].string, (yypvt[-3].int32==0 ? parser->MakeSig(yypvt[-8].int32,yypvt[-7].binstr,yypvt[-1].binstr) : parser->MakeSig(yypvt[-8].int32| IMAGE_CEE_CS_CALLCONV_GENERIC,yypvt[-7].binstr,yypvt[-1].binstr,yypvt[-3].int32))); PASM->ResetArgNameList(); } break; case 180: #line 548 "asmparse.y" { if((yypvt[-1].int32 > 0) && (yypvt[-1].int32 <= (int)PASM->m_pCurClass->m_NumTyPars)) PASM->m_pCustomDescrList = PASM->m_pCurClass->m_TyPars[yypvt[-1].int32-1].CAList(); else PASM->report->error("Type parameter index out of range\n"); } break; case 181: #line 553 "asmparse.y" { int n = PASM->m_pCurClass->FindTyPar(yypvt[-0].string); if(n >= 0) PASM->m_pCustomDescrList = PASM->m_pCurClass->m_TyPars[n].CAList(); else PASM->report->error("Type parameter '%s' undefined\n",yypvt[-0].string); } break; case 182: #line 559 "asmparse.y" { PASM->AddGenericParamConstraint(yypvt[-3].int32, 0, yypvt[-0].token); } break; case 183: #line 560 "asmparse.y" { PASM->AddGenericParamConstraint(0, yypvt[-2].string, yypvt[-0].token); } break; case 184: #line 561 "asmparse.y" { yypvt[-0].cad->tkInterfacePair = yypvt[-1].token; if(PASM->m_pCustomDescrList) PASM->m_pCustomDescrList->PUSH(yypvt[-0].cad); } break; case 185: #line 569 "asmparse.y" { yypvt[-3].binstr->insertInt8(IMAGE_CEE_CS_CALLCONV_FIELD); PASM->AddField(yypvt[-2].string, yypvt[-3].binstr, yypvt[-4].fieldAttr, yypvt[-1].string, yypvt[-0].binstr, yypvt[-5].int32); } break; case 186: #line 573 "asmparse.y" { yyval.fieldAttr = (CorFieldAttr) 0; } break; case 187: #line 574 "asmparse.y" { yyval.fieldAttr = (CorFieldAttr) (yypvt[-1].fieldAttr | fdStatic); } break; case 188: #line 575 "asmparse.y" { yyval.fieldAttr = (CorFieldAttr) ((yypvt[-1].fieldAttr & ~mdMemberAccessMask) | fdPublic); } break; case 189: #line 576 "asmparse.y" { yyval.fieldAttr = (CorFieldAttr) ((yypvt[-1].fieldAttr & ~mdMemberAccessMask) | fdPrivate); } break; case 190: #line 577 "asmparse.y" { yyval.fieldAttr = (CorFieldAttr) ((yypvt[-1].fieldAttr & ~mdMemberAccessMask) | fdFamily); } break; case 191: #line 578 "asmparse.y" { yyval.fieldAttr = (CorFieldAttr) (yypvt[-1].fieldAttr | fdInitOnly); } break; case 192: #line 579 "asmparse.y" { yyval.fieldAttr = yypvt[-1].fieldAttr; } break; case 193: #line 580 "asmparse.y" { yyval.fieldAttr = (CorFieldAttr) (yypvt[-1].fieldAttr | fdSpecialName); } break; case 194: #line 593 "asmparse.y" { PASM->m_pMarshal = yypvt[-1].binstr; } break; case 195: #line 594 "asmparse.y" { yyval.fieldAttr = (CorFieldAttr) ((yypvt[-1].fieldAttr & ~mdMemberAccessMask) | fdAssembly); } break; case 196: #line 595 "asmparse.y" { yyval.fieldAttr = (CorFieldAttr) ((yypvt[-1].fieldAttr & ~mdMemberAccessMask) | fdFamANDAssem); } break; case 197: #line 596 "asmparse.y" { yyval.fieldAttr = (CorFieldAttr) ((yypvt[-1].fieldAttr & ~mdMemberAccessMask) | fdFamORAssem); } break; case 198: #line 597 "asmparse.y" { yyval.fieldAttr = (CorFieldAttr) ((yypvt[-1].fieldAttr & ~mdMemberAccessMask) | fdPrivateScope); } break; case 199: #line 598 "asmparse.y" { yyval.fieldAttr = (CorFieldAttr) (yypvt[-1].fieldAttr | fdLiteral); } break; case 200: #line 599 "asmparse.y" { yyval.fieldAttr = (CorFieldAttr) (yypvt[-1].fieldAttr | fdNotSerialized); } break; case 201: #line 600 "asmparse.y" { yyval.fieldAttr = (CorFieldAttr) (yypvt[-1].int32); } break; case 202: #line 603 "asmparse.y" { yyval.string = 0; } break; case 203: #line 604 "asmparse.y" { yyval.string = yypvt[-0].string; } break; case 204: #line 607 "asmparse.y" { yyval.binstr = NULL; } break; case 205: #line 608 "asmparse.y" { yyval.binstr = yypvt[-0].binstr; } break; case 206: #line 611 "asmparse.y" { yyval.int32 = 0xFFFFFFFF; } break; case 207: #line 612 "asmparse.y" { yyval.int32 = yypvt[-1].int32; } break; case 208: #line 617 "asmparse.y" { PASM->ResetArgNameList(); if (yypvt[-3].binstr == NULL) { if((iCallConv)&&((yypvt[-8].int32 & iCallConv) != iCallConv)) parser->warn("'instance' added to method's calling convention\n"); yyval.token = PASM->MakeMemberRef(yypvt[-6].token, yypvt[-4].string, parser->MakeSig(yypvt[-8].int32|iCallConv, yypvt[-7].binstr, yypvt[-1].binstr)); } else { mdToken mr; if((iCallConv)&&((yypvt[-8].int32 & iCallConv) != iCallConv)) parser->warn("'instance' added to method's calling convention\n"); mr = PASM->MakeMemberRef(yypvt[-6].token, yypvt[-4].string, parser->MakeSig(yypvt[-8].int32 | IMAGE_CEE_CS_CALLCONV_GENERIC|iCallConv, yypvt[-7].binstr, yypvt[-1].binstr, corCountArgs(yypvt[-3].binstr))); yyval.token = PASM->MakeMethodSpec(mr, parser->MakeSig(IMAGE_CEE_CS_CALLCONV_INSTANTIATION, 0, yypvt[-3].binstr)); } } break; case 209: #line 634 "asmparse.y" { PASM->ResetArgNameList(); if((iCallConv)&&((yypvt[-8].int32 & iCallConv) != iCallConv)) parser->warn("'instance' added to method's calling convention\n"); yyval.token = PASM->MakeMemberRef(yypvt[-6].token, yypvt[-4].string, parser->MakeSig(yypvt[-8].int32 | IMAGE_CEE_CS_CALLCONV_GENERIC|iCallConv, yypvt[-7].binstr, yypvt[-1].binstr, yypvt[-3].int32)); } break; case 210: #line 640 "asmparse.y" { PASM->ResetArgNameList(); if (yypvt[-3].binstr == NULL) { if((iCallConv)&&((yypvt[-6].int32 & iCallConv) != iCallConv)) parser->warn("'instance' added to method's calling convention\n"); yyval.token = PASM->MakeMemberRef(mdTokenNil, yypvt[-4].string, parser->MakeSig(yypvt[-6].int32|iCallConv, yypvt[-5].binstr, yypvt[-1].binstr)); } else { mdToken mr; if((iCallConv)&&((yypvt[-6].int32 & iCallConv) != iCallConv)) parser->warn("'instance' added to method's calling convention\n"); mr = PASM->MakeMemberRef(mdTokenNil, yypvt[-4].string, parser->MakeSig(yypvt[-6].int32 | IMAGE_CEE_CS_CALLCONV_GENERIC|iCallConv, yypvt[-5].binstr, yypvt[-1].binstr, corCountArgs(yypvt[-3].binstr))); yyval.token = PASM->MakeMethodSpec(mr, parser->MakeSig(IMAGE_CEE_CS_CALLCONV_INSTANTIATION, 0, yypvt[-3].binstr)); } } break; case 211: #line 656 "asmparse.y" { PASM->ResetArgNameList(); if((iCallConv)&&((yypvt[-6].int32 & iCallConv) != iCallConv)) parser->warn("'instance' added to method's calling convention\n"); yyval.token = PASM->MakeMemberRef(mdTokenNil, yypvt[-4].string, parser->MakeSig(yypvt[-6].int32 | IMAGE_CEE_CS_CALLCONV_GENERIC|iCallConv, yypvt[-5].binstr, yypvt[-1].binstr, yypvt[-3].int32)); } break; case 212: #line 660 "asmparse.y" { yyval.token = yypvt[-0].token; } break; case 213: #line 661 "asmparse.y" { yyval.token = yypvt[-0].tdd->m_tkTypeSpec; } break; case 214: #line 662 "asmparse.y" { yyval.token = yypvt[-0].tdd->m_tkTypeSpec; } break; case 215: #line 665 "asmparse.y" { yyval.int32 = (yypvt[-0].int32 | IMAGE_CEE_CS_CALLCONV_HASTHIS); } break; case 216: #line 666 "asmparse.y" { yyval.int32 = (yypvt[-0].int32 | IMAGE_CEE_CS_CALLCONV_EXPLICITTHIS); } break; case 217: #line 667 "asmparse.y" { yyval.int32 = yypvt[-0].int32; } break; case 218: #line 668 "asmparse.y" { yyval.int32 = yypvt[-1].int32; } break; case 219: #line 671 "asmparse.y" { yyval.int32 = IMAGE_CEE_CS_CALLCONV_DEFAULT; } break; case 220: #line 672 "asmparse.y" { yyval.int32 = IMAGE_CEE_CS_CALLCONV_DEFAULT; } break; case 221: #line 673 "asmparse.y" { yyval.int32 = IMAGE_CEE_CS_CALLCONV_VARARG; } break; case 222: #line 674 "asmparse.y" { yyval.int32 = IMAGE_CEE_CS_CALLCONV_C; } break; case 223: #line 675 "asmparse.y" { yyval.int32 = IMAGE_CEE_CS_CALLCONV_STDCALL; } break; case 224: #line 676 "asmparse.y" { yyval.int32 = IMAGE_CEE_CS_CALLCONV_THISCALL; } break; case 225: #line 677 "asmparse.y" { yyval.int32 = IMAGE_CEE_CS_CALLCONV_FASTCALL; } break; case 226: #line 678 "asmparse.y" { yyval.int32 = IMAGE_CEE_CS_CALLCONV_UNMANAGED; } break; case 227: #line 681 "asmparse.y" { yyval.token = yypvt[-1].int32; } break; case 228: #line 684 "asmparse.y" { yyval.token = yypvt[-0].token; PASM->delArgNameList(PASM->m_firstArgName); PASM->m_firstArgName = parser->m_ANSFirst.POP(); PASM->m_lastArgName = parser->m_ANSLast.POP(); PASM->SetMemberRefFixup(yypvt[-0].token,iOpcodeLen); } break; case 229: #line 690 "asmparse.y" { yypvt[-3].binstr->insertInt8(IMAGE_CEE_CS_CALLCONV_FIELD); yyval.token = PASM->MakeMemberRef(yypvt[-2].token, yypvt[-0].string, yypvt[-3].binstr); PASM->SetMemberRefFixup(yyval.token,iOpcodeLen); } break; case 230: #line 694 "asmparse.y" { yypvt[-1].binstr->insertInt8(IMAGE_CEE_CS_CALLCONV_FIELD); yyval.token = PASM->MakeMemberRef(NULL, yypvt[-0].string, yypvt[-1].binstr); PASM->SetMemberRefFixup(yyval.token,iOpcodeLen); } break; case 231: #line 697 "asmparse.y" { yyval.token = yypvt[-0].tdd->m_tkTypeSpec; PASM->SetMemberRefFixup(yyval.token,iOpcodeLen); } break; case 232: #line 699 "asmparse.y" { yyval.token = yypvt[-0].tdd->m_tkTypeSpec; PASM->SetMemberRefFixup(yyval.token,iOpcodeLen); } break; case 233: #line 701 "asmparse.y" { yyval.token = yypvt[-0].token; PASM->SetMemberRefFixup(yyval.token,iOpcodeLen); } break; case 234: #line 706 "asmparse.y" { PASM->ResetEvent(yypvt[-0].string, yypvt[-1].token, yypvt[-2].eventAttr); } break; case 235: #line 707 "asmparse.y" { PASM->ResetEvent(yypvt[-0].string, mdTypeRefNil, yypvt[-1].eventAttr); } break; case 236: #line 711 "asmparse.y" { yyval.eventAttr = (CorEventAttr) 0; } break; case 237: #line 712 "asmparse.y" { yyval.eventAttr = yypvt[-1].eventAttr; } break; case 238: #line 713 "asmparse.y" { yyval.eventAttr = (CorEventAttr) (yypvt[-1].eventAttr | evSpecialName); } break; case 241: #line 720 "asmparse.y" { PASM->SetEventMethod(0, yypvt[-0].token); } break; case 242: #line 721 "asmparse.y" { PASM->SetEventMethod(1, yypvt[-0].token); } break; case 243: #line 722 "asmparse.y" { PASM->SetEventMethod(2, yypvt[-0].token); } break; case 244: #line 723 "asmparse.y" { PASM->SetEventMethod(3, yypvt[-0].token); } break; case 249: #line 732 "asmparse.y" { PASM->ResetProp(yypvt[-4].string, parser->MakeSig((IMAGE_CEE_CS_CALLCONV_PROPERTY | (yypvt[-6].int32 & IMAGE_CEE_CS_CALLCONV_HASTHIS)),yypvt[-5].binstr,yypvt[-2].binstr), yypvt[-7].propAttr, yypvt[-0].binstr);} break; case 250: #line 737 "asmparse.y" { yyval.propAttr = (CorPropertyAttr) 0; } break; case 251: #line 738 "asmparse.y" { yyval.propAttr = yypvt[-1].propAttr; } break; case 252: #line 739 "asmparse.y" { yyval.propAttr = (CorPropertyAttr) (yypvt[-1].propAttr | prSpecialName); } break; case 255: #line 747 "asmparse.y" { PASM->SetPropMethod(0, yypvt[-0].token); } break; case 256: #line 748 "asmparse.y" { PASM->SetPropMethod(1, yypvt[-0].token); } break; case 257: #line 749 "asmparse.y" { PASM->SetPropMethod(2, yypvt[-0].token); } break; case 262: #line 757 "asmparse.y" { PASM->ResetForNextMethod(); uMethodBeginLine = PASM->m_ulCurLine; uMethodBeginColumn=PASM->m_ulCurColumn; } break; case 263: #line 763 "asmparse.y" { yyval.binstr = NULL; } break; case 264: #line 764 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; } break; case 265: #line 767 "asmparse.y" { yyval.binstr = yypvt[-0].binstr; } break; case 266: #line 768 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; } break; case 267: #line 771 "asmparse.y" { bParsingByteArray = TRUE; } break; case 268: #line 775 "asmparse.y" { BinStr* sig; if (yypvt[-5].typarlist == NULL) sig = parser->MakeSig(yypvt[-10].int32, yypvt[-8].binstr, yypvt[-3].binstr); else { FixupTyPars(yypvt[-8].binstr); sig = parser->MakeSig(yypvt[-10].int32 | IMAGE_CEE_CS_CALLCONV_GENERIC, yypvt[-8].binstr, yypvt[-3].binstr, yypvt[-5].typarlist->Count()); FixupConstraints(); } PASM->StartMethod(yypvt[-6].string, sig, yypvt[-11].methAttr, yypvt[-7].binstr, yypvt[-9].int32, yypvt[-5].typarlist); TyParFixupList.RESET(false); PASM->SetImplAttr((USHORT)yypvt[-1].implAttr); PASM->m_pCurMethod->m_ulLines[0] = uMethodBeginLine; PASM->m_pCurMethod->m_ulColumns[0]=uMethodBeginColumn; } break; case 269: #line 790 "asmparse.y" { yyval.methAttr = (CorMethodAttr) 0; } break; case 270: #line 791 "asmparse.y" { yyval.methAttr = (CorMethodAttr) (yypvt[-1].methAttr | mdStatic); } break; case 271: #line 792 "asmparse.y" { yyval.methAttr = (CorMethodAttr) ((yypvt[-1].methAttr & ~mdMemberAccessMask) | mdPublic); } break; case 272: #line 793 "asmparse.y" { yyval.methAttr = (CorMethodAttr) ((yypvt[-1].methAttr & ~mdMemberAccessMask) | mdPrivate); } break; case 273: #line 794 "asmparse.y" { yyval.methAttr = (CorMethodAttr) ((yypvt[-1].methAttr & ~mdMemberAccessMask) | mdFamily); } break; case 274: #line 795 "asmparse.y" { yyval.methAttr = (CorMethodAttr) (yypvt[-1].methAttr | mdFinal); } break; case 275: #line 796 "asmparse.y" { yyval.methAttr = (CorMethodAttr) (yypvt[-1].methAttr | mdSpecialName); } break; case 276: #line 797 "asmparse.y" { yyval.methAttr = (CorMethodAttr) (yypvt[-1].methAttr | mdVirtual); } break; case 277: #line 798 "asmparse.y" { yyval.methAttr = (CorMethodAttr) (yypvt[-1].methAttr | mdCheckAccessOnOverride); } break; case 278: #line 799 "asmparse.y" { yyval.methAttr = (CorMethodAttr) (yypvt[-1].methAttr | mdAbstract); } break; case 279: #line 800 "asmparse.y" { yyval.methAttr = (CorMethodAttr) ((yypvt[-1].methAttr & ~mdMemberAccessMask) | mdAssem); } break; case 280: #line 801 "asmparse.y" { yyval.methAttr = (CorMethodAttr) ((yypvt[-1].methAttr & ~mdMemberAccessMask) | mdFamANDAssem); } break; case 281: #line 802 "asmparse.y" { yyval.methAttr = (CorMethodAttr) ((yypvt[-1].methAttr & ~mdMemberAccessMask) | mdFamORAssem); } break; case 282: #line 803 "asmparse.y" { yyval.methAttr = (CorMethodAttr) ((yypvt[-1].methAttr & ~mdMemberAccessMask) | mdPrivateScope); } break; case 283: #line 804 "asmparse.y" { yyval.methAttr = (CorMethodAttr) (yypvt[-1].methAttr | mdHideBySig); } break; case 284: #line 805 "asmparse.y" { yyval.methAttr = (CorMethodAttr) (yypvt[-1].methAttr | mdNewSlot); } break; case 285: #line 806 "asmparse.y" { yyval.methAttr = yypvt[-1].methAttr; } break; case 286: #line 807 "asmparse.y" { yyval.methAttr = (CorMethodAttr) (yypvt[-1].methAttr | mdUnmanagedExport); } break; case 287: #line 808 "asmparse.y" { yyval.methAttr = (CorMethodAttr) (yypvt[-1].methAttr | mdRequireSecObject); } break; case 288: #line 809 "asmparse.y" { yyval.methAttr = (CorMethodAttr) (yypvt[-1].int32); } break; case 289: #line 811 "asmparse.y" { PASM->SetPinvoke(yypvt[-4].binstr,0,yypvt[-2].binstr,yypvt[-1].pinvAttr); yyval.methAttr = (CorMethodAttr) (yypvt[-7].methAttr | mdPinvokeImpl); } break; case 290: #line 814 "asmparse.y" { PASM->SetPinvoke(yypvt[-2].binstr,0,NULL,yypvt[-1].pinvAttr); yyval.methAttr = (CorMethodAttr) (yypvt[-5].methAttr | mdPinvokeImpl); } break; case 291: #line 817 "asmparse.y" { PASM->SetPinvoke(new BinStr(),0,NULL,yypvt[-1].pinvAttr); yyval.methAttr = (CorMethodAttr) (yypvt[-4].methAttr | mdPinvokeImpl); } break; case 292: #line 821 "asmparse.y" { yyval.pinvAttr = (CorPinvokeMap) 0; } break; case 293: #line 822 "asmparse.y" { yyval.pinvAttr = (CorPinvokeMap) (yypvt[-1].pinvAttr | pmNoMangle); } break; case 294: #line 823 "asmparse.y" { yyval.pinvAttr = (CorPinvokeMap) (yypvt[-1].pinvAttr | pmCharSetAnsi); } break; case 295: #line 824 "asmparse.y" { yyval.pinvAttr = (CorPinvokeMap) (yypvt[-1].pinvAttr | pmCharSetUnicode); } break; case 296: #line 825 "asmparse.y" { yyval.pinvAttr = (CorPinvokeMap) (yypvt[-1].pinvAttr | pmCharSetAuto); } break; case 297: #line 826 "asmparse.y" { yyval.pinvAttr = (CorPinvokeMap) (yypvt[-1].pinvAttr | pmSupportsLastError); } break; case 298: #line 827 "asmparse.y" { yyval.pinvAttr = (CorPinvokeMap) (yypvt[-1].pinvAttr | pmCallConvWinapi); } break; case 299: #line 828 "asmparse.y" { yyval.pinvAttr = (CorPinvokeMap) (yypvt[-1].pinvAttr | pmCallConvCdecl); } break; case 300: #line 829 "asmparse.y" { yyval.pinvAttr = (CorPinvokeMap) (yypvt[-1].pinvAttr | pmCallConvStdcall); } break; case 301: #line 830 "asmparse.y" { yyval.pinvAttr = (CorPinvokeMap) (yypvt[-1].pinvAttr | pmCallConvThiscall); } break; case 302: #line 831 "asmparse.y" { yyval.pinvAttr = (CorPinvokeMap) (yypvt[-1].pinvAttr | pmCallConvFastcall); } break; case 303: #line 832 "asmparse.y" { yyval.pinvAttr = (CorPinvokeMap) (yypvt[-3].pinvAttr | pmBestFitEnabled); } break; case 304: #line 833 "asmparse.y" { yyval.pinvAttr = (CorPinvokeMap) (yypvt[-3].pinvAttr | pmBestFitDisabled); } break; case 305: #line 834 "asmparse.y" { yyval.pinvAttr = (CorPinvokeMap) (yypvt[-3].pinvAttr | pmThrowOnUnmappableCharEnabled); } break; case 306: #line 835 "asmparse.y" { yyval.pinvAttr = (CorPinvokeMap) (yypvt[-3].pinvAttr | pmThrowOnUnmappableCharDisabled); } break; case 307: #line 836 "asmparse.y" { yyval.pinvAttr = (CorPinvokeMap) (yypvt[-1].int32); } break; case 308: #line 839 "asmparse.y" { yyval.string = newString(COR_CTOR_METHOD_NAME); } break; case 309: #line 840 "asmparse.y" { yyval.string = newString(COR_CCTOR_METHOD_NAME); } break; case 310: #line 841 "asmparse.y" { yyval.string = yypvt[-0].string; } break; case 311: #line 844 "asmparse.y" { yyval.int32 = 0; } break; case 312: #line 845 "asmparse.y" { yyval.int32 = yypvt[-3].int32 | pdIn; } break; case 313: #line 846 "asmparse.y" { yyval.int32 = yypvt[-3].int32 | pdOut; } break; case 314: #line 847 "asmparse.y" { yyval.int32 = yypvt[-3].int32 | pdOptional; } break; case 315: #line 848 "asmparse.y" { yyval.int32 = yypvt[-1].int32 + 1; } break; case 316: #line 851 "asmparse.y" { yyval.implAttr = (CorMethodImpl) (miIL | miManaged); } break; case 317: #line 852 "asmparse.y" { yyval.implAttr = (CorMethodImpl) ((yypvt[-1].implAttr & 0xFFF4) | miNative); } break; case 318: #line 853 "asmparse.y" { yyval.implAttr = (CorMethodImpl) ((yypvt[-1].implAttr & 0xFFF4) | miIL); } break; case 319: #line 854 "asmparse.y" { yyval.implAttr = (CorMethodImpl) ((yypvt[-1].implAttr & 0xFFF4) | miOPTIL); } break; case 320: #line 855 "asmparse.y" { yyval.implAttr = (CorMethodImpl) ((yypvt[-1].implAttr & 0xFFFB) | miManaged); } break; case 321: #line 856 "asmparse.y" { yyval.implAttr = (CorMethodImpl) ((yypvt[-1].implAttr & 0xFFFB) | miUnmanaged); } break; case 322: #line 857 "asmparse.y" { yyval.implAttr = (CorMethodImpl) (yypvt[-1].implAttr | miForwardRef); } break; case 323: #line 858 "asmparse.y" { yyval.implAttr = (CorMethodImpl) (yypvt[-1].implAttr | miPreserveSig); } break; case 324: #line 859 "asmparse.y" { yyval.implAttr = (CorMethodImpl) (yypvt[-1].implAttr | miRuntime); } break; case 325: #line 860 "asmparse.y" { yyval.implAttr = (CorMethodImpl) (yypvt[-1].implAttr | miInternalCall); } break; case 326: #line 861 "asmparse.y" { yyval.implAttr = (CorMethodImpl) (yypvt[-1].implAttr | miSynchronized); } break; case 327: #line 862 "asmparse.y" { yyval.implAttr = (CorMethodImpl) (yypvt[-1].implAttr | miNoInlining); } break; case 328: #line 863 "asmparse.y" { yyval.implAttr = (CorMethodImpl) (yypvt[-1].implAttr | miAggressiveInlining); } break; case 329: #line 864 "asmparse.y" { yyval.implAttr = (CorMethodImpl) (yypvt[-1].implAttr | miNoOptimization); } break; case 330: #line 865 "asmparse.y" { yyval.implAttr = (CorMethodImpl) (yypvt[-1].implAttr | miAggressiveOptimization); } break; case 331: #line 866 "asmparse.y" { yyval.implAttr = (CorMethodImpl) (yypvt[-1].int32); } break; case 332: #line 869 "asmparse.y" { PASM->delArgNameList(PASM->m_firstArgName); PASM->m_firstArgName = NULL;PASM->m_lastArgName = NULL; } break; case 335: #line 877 "asmparse.y" { PASM->EmitByte(yypvt[-0].int32); } break; case 336: #line 878 "asmparse.y" { delete PASM->m_SEHD; PASM->m_SEHD = PASM->m_SEHDstack.POP(); } break; case 337: #line 879 "asmparse.y" { PASM->EmitMaxStack(yypvt[-0].int32); } break; case 338: #line 880 "asmparse.y" { PASM->EmitLocals(parser->MakeSig(IMAGE_CEE_CS_CALLCONV_LOCAL_SIG, 0, yypvt[-1].binstr)); } break; case 339: #line 882 "asmparse.y" { PASM->EmitZeroInit(); PASM->EmitLocals(parser->MakeSig(IMAGE_CEE_CS_CALLCONV_LOCAL_SIG, 0, yypvt[-1].binstr)); } break; case 340: #line 885 "asmparse.y" { PASM->EmitEntryPoint(); } break; case 341: #line 886 "asmparse.y" { PASM->EmitZeroInit(); } break; case 344: #line 889 "asmparse.y" { PASM->AddLabel(PASM->m_CurPC,yypvt[-1].string); /*PASM->EmitLabel($1);*/ } break; case 350: #line 895 "asmparse.y" { if(PASM->m_pCurMethod->m_dwExportOrdinal == 0xFFFFFFFF) { PASM->m_pCurMethod->m_dwExportOrdinal = yypvt[-1].int32; PASM->m_pCurMethod->m_szExportAlias = NULL; if(PASM->m_pCurMethod->m_wVTEntry == 0) PASM->m_pCurMethod->m_wVTEntry = 1; if(PASM->m_pCurMethod->m_wVTSlot == 0) PASM->m_pCurMethod->m_wVTSlot = yypvt[-1].int32 + 0x8000; } else PASM->report->warn("Duplicate .export directive, ignored\n"); } break; case 351: #line 905 "asmparse.y" { if(PASM->m_pCurMethod->m_dwExportOrdinal == 0xFFFFFFFF) { PASM->m_pCurMethod->m_dwExportOrdinal = yypvt[-3].int32; PASM->m_pCurMethod->m_szExportAlias = yypvt[-0].string; if(PASM->m_pCurMethod->m_wVTEntry == 0) PASM->m_pCurMethod->m_wVTEntry = 1; if(PASM->m_pCurMethod->m_wVTSlot == 0) PASM->m_pCurMethod->m_wVTSlot = yypvt[-3].int32 + 0x8000; } else PASM->report->warn("Duplicate .export directive, ignored\n"); } break; case 352: #line 915 "asmparse.y" { PASM->m_pCurMethod->m_wVTEntry = (WORD)yypvt[-2].int32; PASM->m_pCurMethod->m_wVTSlot = (WORD)yypvt[-0].int32; } break; case 353: #line 918 "asmparse.y" { PASM->AddMethodImpl(yypvt[-2].token,yypvt[-0].string,NULL,NULL,NULL,NULL); } break; case 354: #line 921 "asmparse.y" { PASM->AddMethodImpl(yypvt[-6].token,yypvt[-4].string, (yypvt[-3].int32==0 ? parser->MakeSig(yypvt[-8].int32,yypvt[-7].binstr,yypvt[-1].binstr) : parser->MakeSig(yypvt[-8].int32| IMAGE_CEE_CS_CALLCONV_GENERIC,yypvt[-7].binstr,yypvt[-1].binstr,yypvt[-3].int32)) ,NULL,NULL,NULL); PASM->ResetArgNameList(); } break; case 356: #line 928 "asmparse.y" { if((yypvt[-1].int32 > 0) && (yypvt[-1].int32 <= (int)PASM->m_pCurMethod->m_NumTyPars)) PASM->m_pCustomDescrList = PASM->m_pCurMethod->m_TyPars[yypvt[-1].int32-1].CAList(); else PASM->report->error("Type parameter index out of range\n"); } break; case 357: #line 933 "asmparse.y" { int n = PASM->m_pCurMethod->FindTyPar(yypvt[-0].string); if(n >= 0) PASM->m_pCustomDescrList = PASM->m_pCurMethod->m_TyPars[n].CAList(); else PASM->report->error("Type parameter '%s' undefined\n",yypvt[-0].string); } break; case 358: #line 939 "asmparse.y" { PASM->m_pCurMethod->AddGenericParamConstraint(yypvt[-3].int32, 0, yypvt[-0].token); } break; case 359: #line 940 "asmparse.y" { PASM->m_pCurMethod->AddGenericParamConstraint(0, yypvt[-2].string, yypvt[-0].token); } break; case 360: #line 943 "asmparse.y" { if( yypvt[-2].int32 ) { ARG_NAME_LIST* pAN=PASM->findArg(PASM->m_pCurMethod->m_firstArgName, yypvt[-2].int32 - 1); if(pAN) { PASM->m_pCustomDescrList = &(pAN->CustDList); pAN->pValue = yypvt[-0].binstr; } else { PASM->m_pCustomDescrList = NULL; if(yypvt[-0].binstr) delete yypvt[-0].binstr; } } else { PASM->m_pCustomDescrList = &(PASM->m_pCurMethod->m_RetCustDList); PASM->m_pCurMethod->m_pRetValue = yypvt[-0].binstr; } PASM->m_tkCurrentCVOwner = 0; } break; case 361: #line 963 "asmparse.y" { PASM->m_pCurMethod->CloseScope(); } break; case 362: #line 966 "asmparse.y" { PASM->m_pCurMethod->OpenScope(); } break; case 366: #line 977 "asmparse.y" { PASM->m_SEHD->tryTo = PASM->m_CurPC; } break; case 367: #line 978 "asmparse.y" { PASM->SetTryLabels(yypvt[-2].string, yypvt[-0].string); } break; case 368: #line 979 "asmparse.y" { if(PASM->m_SEHD) {PASM->m_SEHD->tryFrom = yypvt[-2].int32; PASM->m_SEHD->tryTo = yypvt[-0].int32;} } break; case 369: #line 983 "asmparse.y" { PASM->NewSEHDescriptor(); PASM->m_SEHD->tryFrom = PASM->m_CurPC; } break; case 370: #line 988 "asmparse.y" { PASM->EmitTry(); } break; case 371: #line 989 "asmparse.y" { PASM->EmitTry(); } break; case 372: #line 990 "asmparse.y" { PASM->EmitTry(); } break; case 373: #line 991 "asmparse.y" { PASM->EmitTry(); } break; case 374: #line 995 "asmparse.y" { PASM->m_SEHD->sehHandler = PASM->m_CurPC; } break; case 375: #line 996 "asmparse.y" { PASM->SetFilterLabel(yypvt[-0].string); PASM->m_SEHD->sehHandler = PASM->m_CurPC; } break; case 376: #line 998 "asmparse.y" { PASM->m_SEHD->sehFilter = yypvt[-0].int32; PASM->m_SEHD->sehHandler = PASM->m_CurPC; } break; case 377: #line 1002 "asmparse.y" { PASM->m_SEHD->sehClause = COR_ILEXCEPTION_CLAUSE_FILTER; PASM->m_SEHD->sehFilter = PASM->m_CurPC; } break; case 378: #line 1006 "asmparse.y" { PASM->m_SEHD->sehClause = COR_ILEXCEPTION_CLAUSE_NONE; PASM->SetCatchClass(yypvt[-0].token); PASM->m_SEHD->sehHandler = PASM->m_CurPC; } break; case 379: #line 1011 "asmparse.y" { PASM->m_SEHD->sehClause = COR_ILEXCEPTION_CLAUSE_FINALLY; PASM->m_SEHD->sehHandler = PASM->m_CurPC; } break; case 380: #line 1015 "asmparse.y" { PASM->m_SEHD->sehClause = COR_ILEXCEPTION_CLAUSE_FAULT; PASM->m_SEHD->sehHandler = PASM->m_CurPC; } break; case 381: #line 1019 "asmparse.y" { PASM->m_SEHD->sehHandlerTo = PASM->m_CurPC; } break; case 382: #line 1020 "asmparse.y" { PASM->SetHandlerLabels(yypvt[-2].string, yypvt[-0].string); } break; case 383: #line 1021 "asmparse.y" { PASM->m_SEHD->sehHandler = yypvt[-2].int32; PASM->m_SEHD->sehHandlerTo = yypvt[-0].int32; } break; case 385: #line 1029 "asmparse.y" { PASM->EmitDataLabel(yypvt[-1].string); } break; case 387: #line 1033 "asmparse.y" { PASM->SetDataSection(); } break; case 388: #line 1034 "asmparse.y" { PASM->SetTLSSection(); } break; case 389: #line 1035 "asmparse.y" { PASM->SetILSection(); } break; case 394: #line 1046 "asmparse.y" { yyval.int32 = 1; } break; case 395: #line 1047 "asmparse.y" { yyval.int32 = yypvt[-1].int32; if(yypvt[-1].int32 <= 0) { PASM->report->error("Illegal item count: %d\n",yypvt[-1].int32); if(!PASM->OnErrGo) yyval.int32 = 1; }} break; case 396: #line 1052 "asmparse.y" { PASM->EmitDataString(yypvt[-1].binstr); } break; case 397: #line 1053 "asmparse.y" { PASM->EmitDD(yypvt[-1].string); } break; case 398: #line 1054 "asmparse.y" { PASM->EmitData(yypvt[-1].binstr->ptr(),yypvt[-1].binstr->length()); } break; case 399: #line 1056 "asmparse.y" { float f = (float) (*yypvt[-2].float64); float* p = new (nothrow) float[yypvt[-0].int32]; if(p != NULL) { for(int i=0; i < yypvt[-0].int32; i++) p[i] = f; PASM->EmitData(p, sizeof(float)*yypvt[-0].int32); delete yypvt[-2].float64; delete [] p; } else PASM->report->error("Out of memory emitting data block %d bytes\n", sizeof(float)*yypvt[-0].int32); } break; case 400: #line 1063 "asmparse.y" { double* p = new (nothrow) double[yypvt[-0].int32]; if(p != NULL) { for(int i=0; i<yypvt[-0].int32; i++) p[i] = *(yypvt[-2].float64); PASM->EmitData(p, sizeof(double)*yypvt[-0].int32); delete yypvt[-2].float64; delete [] p; } else PASM->report->error("Out of memory emitting data block %d bytes\n", sizeof(double)*yypvt[-0].int32); } break; case 401: #line 1070 "asmparse.y" { __int64* p = new (nothrow) __int64[yypvt[-0].int32]; if(p != NULL) { for(int i=0; i<yypvt[-0].int32; i++) p[i] = *(yypvt[-2].int64); PASM->EmitData(p, sizeof(__int64)*yypvt[-0].int32); delete yypvt[-2].int64; delete [] p; } else PASM->report->error("Out of memory emitting data block %d bytes\n", sizeof(__int64)*yypvt[-0].int32); } break; case 402: #line 1077 "asmparse.y" { __int32* p = new (nothrow) __int32[yypvt[-0].int32]; if(p != NULL) { for(int i=0; i<yypvt[-0].int32; i++) p[i] = yypvt[-2].int32; PASM->EmitData(p, sizeof(__int32)*yypvt[-0].int32); delete [] p; } else PASM->report->error("Out of memory emitting data block %d bytes\n", sizeof(__int32)*yypvt[-0].int32); } break; case 403: #line 1084 "asmparse.y" { __int16 i = (__int16) yypvt[-2].int32; FAIL_UNLESS(i == yypvt[-2].int32, ("Value %d too big\n", yypvt[-2].int32)); __int16* p = new (nothrow) __int16[yypvt[-0].int32]; if(p != NULL) { for(int j=0; j<yypvt[-0].int32; j++) p[j] = i; PASM->EmitData(p, sizeof(__int16)*yypvt[-0].int32); delete [] p; } else PASM->report->error("Out of memory emitting data block %d bytes\n", sizeof(__int16)*yypvt[-0].int32); } break; case 404: #line 1092 "asmparse.y" { __int8 i = (__int8) yypvt[-2].int32; FAIL_UNLESS(i == yypvt[-2].int32, ("Value %d too big\n", yypvt[-2].int32)); __int8* p = new (nothrow) __int8[yypvt[-0].int32]; if(p != NULL) { for(int j=0; j<yypvt[-0].int32; j++) p[j] = i; PASM->EmitData(p, sizeof(__int8)*yypvt[-0].int32); delete [] p; } else PASM->report->error("Out of memory emitting data block %d bytes\n", sizeof(__int8)*yypvt[-0].int32); } break; case 405: #line 1099 "asmparse.y" { PASM->EmitData(NULL, sizeof(float)*yypvt[-0].int32); } break; case 406: #line 1100 "asmparse.y" { PASM->EmitData(NULL, sizeof(double)*yypvt[-0].int32); } break; case 407: #line 1101 "asmparse.y" { PASM->EmitData(NULL, sizeof(__int64)*yypvt[-0].int32); } break; case 408: #line 1102 "asmparse.y" { PASM->EmitData(NULL, sizeof(__int32)*yypvt[-0].int32); } break; case 409: #line 1103 "asmparse.y" { PASM->EmitData(NULL, sizeof(__int16)*yypvt[-0].int32); } break; case 410: #line 1104 "asmparse.y" { PASM->EmitData(NULL, sizeof(__int8)*yypvt[-0].int32); } break; case 411: #line 1108 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_R4); float f = (float)(*yypvt[-1].float64); yyval.binstr->appendInt32(*((__int32*)&f)); delete yypvt[-1].float64; } break; case 412: #line 1111 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_R8); yyval.binstr->appendInt64((__int64 *)yypvt[-1].float64); delete yypvt[-1].float64; } break; case 413: #line 1113 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_R4); yyval.binstr->appendInt32(yypvt[-1].int32); } break; case 414: #line 1115 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_R8); yyval.binstr->appendInt64((__int64 *)yypvt[-1].int64); delete yypvt[-1].int64; } break; case 415: #line 1117 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_I8); yyval.binstr->appendInt64((__int64 *)yypvt[-1].int64); delete yypvt[-1].int64; } break; case 416: #line 1119 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_I4); yyval.binstr->appendInt32(yypvt[-1].int32); } break; case 417: #line 1121 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_I2); yyval.binstr->appendInt16(yypvt[-1].int32); } break; case 418: #line 1123 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_I1); yyval.binstr->appendInt8(yypvt[-1].int32); } break; case 419: #line 1125 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_U8); yyval.binstr->appendInt64((__int64 *)yypvt[-1].int64); delete yypvt[-1].int64; } break; case 420: #line 1127 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_U4); yyval.binstr->appendInt32(yypvt[-1].int32); } break; case 421: #line 1129 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_U2); yyval.binstr->appendInt16(yypvt[-1].int32); } break; case 422: #line 1131 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_U1); yyval.binstr->appendInt8(yypvt[-1].int32); } break; case 423: #line 1133 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_U8); yyval.binstr->appendInt64((__int64 *)yypvt[-1].int64); delete yypvt[-1].int64; } break; case 424: #line 1135 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_U4); yyval.binstr->appendInt32(yypvt[-1].int32); } break; case 425: #line 1137 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_U2); yyval.binstr->appendInt16(yypvt[-1].int32); } break; case 426: #line 1139 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_U1); yyval.binstr->appendInt8(yypvt[-1].int32); } break; case 427: #line 1141 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_CHAR); yyval.binstr->appendInt16(yypvt[-1].int32); } break; case 428: #line 1143 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_BOOLEAN); yyval.binstr->appendInt8(yypvt[-1].int32);} break; case 429: #line 1145 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_STRING); yyval.binstr->append(yypvt[-1].binstr); delete yypvt[-1].binstr;} break; case 430: #line 1149 "asmparse.y" { bParsingByteArray = TRUE; } break; case 431: #line 1152 "asmparse.y" { yyval.binstr = new BinStr(); } break; case 432: #line 1153 "asmparse.y" { yyval.binstr = yypvt[-0].binstr; } break; case 433: #line 1156 "asmparse.y" { __int8 i = (__int8) yypvt[-0].int32; yyval.binstr = new BinStr(); yyval.binstr->appendInt8(i); } break; case 434: #line 1157 "asmparse.y" { __int8 i = (__int8) yypvt[-0].int32; yyval.binstr = yypvt[-1].binstr; yyval.binstr->appendInt8(i); } break; case 435: #line 1161 "asmparse.y" { yyval.binstr = yypvt[-0].binstr; } break; case 436: #line 1162 "asmparse.y" { yyval.binstr = BinStrToUnicode(yypvt[-0].binstr,true); yyval.binstr->insertInt8(ELEMENT_TYPE_STRING);} break; case 437: #line 1163 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_CLASS); yyval.binstr->appendInt32(0); } break; case 438: #line 1168 "asmparse.y" { yyval.binstr = yypvt[-0].binstr; } break; case 439: #line 1169 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_STRING); yyval.binstr->appendInt8(0xFF); } break; case 440: #line 1170 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_STRING); AppendStringWithLength(yyval.binstr,yypvt[-1].string); delete [] yypvt[-1].string;} break; case 441: #line 1172 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(SERIALIZATION_TYPE_TYPE); AppendStringWithLength(yyval.binstr,yypvt[-1].string); delete [] yypvt[-1].string;} break; case 442: #line 1174 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(SERIALIZATION_TYPE_TYPE); AppendStringWithLength(yyval.binstr,PASM->ReflectionNotation(yypvt[-1].token));} break; case 443: #line 1176 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(SERIALIZATION_TYPE_TYPE); yyval.binstr->appendInt8(0xFF); } break; case 444: #line 1177 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->insertInt8(SERIALIZATION_TYPE_TAGGED_OBJECT);} break; case 445: #line 1179 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->insertInt32(yypvt[-4].int32); yyval.binstr->insertInt8(ELEMENT_TYPE_R4); yyval.binstr->insertInt8(ELEMENT_TYPE_SZARRAY); } break; case 446: #line 1183 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->insertInt32(yypvt[-4].int32); yyval.binstr->insertInt8(ELEMENT_TYPE_R8); yyval.binstr->insertInt8(ELEMENT_TYPE_SZARRAY); } break; case 447: #line 1187 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->insertInt32(yypvt[-4].int32); yyval.binstr->insertInt8(ELEMENT_TYPE_I8); yyval.binstr->insertInt8(ELEMENT_TYPE_SZARRAY); } break; case 448: #line 1191 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->insertInt32(yypvt[-4].int32); yyval.binstr->insertInt8(ELEMENT_TYPE_I4); yyval.binstr->insertInt8(ELEMENT_TYPE_SZARRAY); } break; case 449: #line 1195 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->insertInt32(yypvt[-4].int32); yyval.binstr->insertInt8(ELEMENT_TYPE_I2); yyval.binstr->insertInt8(ELEMENT_TYPE_SZARRAY); } break; case 450: #line 1199 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->insertInt32(yypvt[-4].int32); yyval.binstr->insertInt8(ELEMENT_TYPE_I1); yyval.binstr->insertInt8(ELEMENT_TYPE_SZARRAY); } break; case 451: #line 1203 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->insertInt32(yypvt[-4].int32); yyval.binstr->insertInt8(ELEMENT_TYPE_U8); yyval.binstr->insertInt8(ELEMENT_TYPE_SZARRAY); } break; case 452: #line 1207 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->insertInt32(yypvt[-4].int32); yyval.binstr->insertInt8(ELEMENT_TYPE_U4); yyval.binstr->insertInt8(ELEMENT_TYPE_SZARRAY); } break; case 453: #line 1211 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->insertInt32(yypvt[-4].int32); yyval.binstr->insertInt8(ELEMENT_TYPE_U2); yyval.binstr->insertInt8(ELEMENT_TYPE_SZARRAY); } break; case 454: #line 1215 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->insertInt32(yypvt[-4].int32); yyval.binstr->insertInt8(ELEMENT_TYPE_U1); yyval.binstr->insertInt8(ELEMENT_TYPE_SZARRAY); } break; case 455: #line 1219 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->insertInt32(yypvt[-4].int32); yyval.binstr->insertInt8(ELEMENT_TYPE_U8); yyval.binstr->insertInt8(ELEMENT_TYPE_SZARRAY); } break; case 456: #line 1223 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->insertInt32(yypvt[-4].int32); yyval.binstr->insertInt8(ELEMENT_TYPE_U4); yyval.binstr->insertInt8(ELEMENT_TYPE_SZARRAY); } break; case 457: #line 1227 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->insertInt32(yypvt[-4].int32); yyval.binstr->insertInt8(ELEMENT_TYPE_U2); yyval.binstr->insertInt8(ELEMENT_TYPE_SZARRAY); } break; case 458: #line 1231 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->insertInt32(yypvt[-4].int32); yyval.binstr->insertInt8(ELEMENT_TYPE_U1); yyval.binstr->insertInt8(ELEMENT_TYPE_SZARRAY); } break; case 459: #line 1235 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->insertInt32(yypvt[-4].int32); yyval.binstr->insertInt8(ELEMENT_TYPE_CHAR); yyval.binstr->insertInt8(ELEMENT_TYPE_SZARRAY); } break; case 460: #line 1239 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->insertInt32(yypvt[-4].int32); yyval.binstr->insertInt8(ELEMENT_TYPE_BOOLEAN); yyval.binstr->insertInt8(ELEMENT_TYPE_SZARRAY); } break; case 461: #line 1243 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->insertInt32(yypvt[-4].int32); yyval.binstr->insertInt8(ELEMENT_TYPE_STRING); yyval.binstr->insertInt8(ELEMENT_TYPE_SZARRAY); } break; case 462: #line 1247 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->insertInt32(yypvt[-4].int32); yyval.binstr->insertInt8(SERIALIZATION_TYPE_TYPE); yyval.binstr->insertInt8(ELEMENT_TYPE_SZARRAY); } break; case 463: #line 1251 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->insertInt32(yypvt[-4].int32); yyval.binstr->insertInt8(SERIALIZATION_TYPE_TAGGED_OBJECT); yyval.binstr->insertInt8(ELEMENT_TYPE_SZARRAY); } break; case 464: #line 1257 "asmparse.y" { yyval.binstr = new BinStr(); } break; case 465: #line 1258 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; float f = (float) (*yypvt[-0].float64); yyval.binstr->appendInt32(*((__int32*)&f)); delete yypvt[-0].float64; } break; case 466: #line 1260 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->appendInt32(yypvt[-0].int32); } break; case 467: #line 1264 "asmparse.y" { yyval.binstr = new BinStr(); } break; case 468: #line 1265 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->appendInt64((__int64 *)yypvt[-0].float64); delete yypvt[-0].float64; } break; case 469: #line 1267 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->appendInt64((__int64 *)yypvt[-0].int64); delete yypvt[-0].int64; } break; case 470: #line 1271 "asmparse.y" { yyval.binstr = new BinStr(); } break; case 471: #line 1272 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->appendInt64((__int64 *)yypvt[-0].int64); delete yypvt[-0].int64; } break; case 472: #line 1276 "asmparse.y" { yyval.binstr = new BinStr(); } break; case 473: #line 1277 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->appendInt32(yypvt[-0].int32);} break; case 474: #line 1280 "asmparse.y" { yyval.binstr = new BinStr(); } break; case 475: #line 1281 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->appendInt16(yypvt[-0].int32);} break; case 476: #line 1284 "asmparse.y" { yyval.binstr = new BinStr(); } break; case 477: #line 1285 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->appendInt8(yypvt[-0].int32); } break; case 478: #line 1288 "asmparse.y" { yyval.binstr = new BinStr(); } break; case 479: #line 1289 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->appendInt8(yypvt[-0].int32);} break; case 480: #line 1293 "asmparse.y" { yyval.binstr = new BinStr(); } break; case 481: #line 1294 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->appendInt8(0xFF); } break; case 482: #line 1295 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; AppendStringWithLength(yyval.binstr,yypvt[-0].string); delete [] yypvt[-0].string;} break; case 483: #line 1299 "asmparse.y" { yyval.binstr = new BinStr(); } break; case 484: #line 1300 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->appendInt8(0xFF); } break; case 485: #line 1301 "asmparse.y" { yyval.binstr = yypvt[-2].binstr; AppendStringWithLength(yyval.binstr,yypvt[-0].string); delete [] yypvt[-0].string;} break; case 486: #line 1303 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; AppendStringWithLength(yyval.binstr,PASM->ReflectionNotation(yypvt[-0].token));} break; case 487: #line 1307 "asmparse.y" { yyval.binstr = new BinStr(); } break; case 488: #line 1308 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->append(yypvt[-0].binstr); delete yypvt[-0].binstr; } break; case 489: #line 1312 "asmparse.y" { parser->m_ANSFirst.PUSH(PASM->m_firstArgName); parser->m_ANSLast.PUSH(PASM->m_lastArgName); PASM->m_firstArgName = NULL; PASM->m_lastArgName = NULL; } break; case 490: #line 1318 "asmparse.y" { yyval.instr = SetupInstr(yypvt[-0].opcode); } break; case 491: #line 1321 "asmparse.y" { yyval.instr = SetupInstr(yypvt[-0].opcode); } break; case 492: #line 1324 "asmparse.y" { yyval.instr = SetupInstr(yypvt[-0].opcode); } break; case 493: #line 1327 "asmparse.y" { yyval.instr = SetupInstr(yypvt[-0].opcode); } break; case 494: #line 1330 "asmparse.y" { yyval.instr = SetupInstr(yypvt[-0].opcode); } break; case 495: #line 1333 "asmparse.y" { yyval.instr = SetupInstr(yypvt[-0].opcode); } break; case 496: #line 1336 "asmparse.y" { yyval.instr = SetupInstr(yypvt[-0].opcode); if((!PASM->OnErrGo)&& ((yypvt[-0].opcode == CEE_NEWOBJ)|| (yypvt[-0].opcode == CEE_CALLVIRT))) iCallConv = IMAGE_CEE_CS_CALLCONV_HASTHIS; } break; case 497: #line 1344 "asmparse.y" { yyval.instr = SetupInstr(yypvt[-0].opcode); } break; case 498: #line 1347 "asmparse.y" { yyval.instr = SetupInstr(yypvt[-0].opcode); } break; case 499: #line 1350 "asmparse.y" { yyval.instr = SetupInstr(yypvt[-0].opcode); } break; case 500: #line 1353 "asmparse.y" { yyval.instr = SetupInstr(yypvt[-0].opcode); } break; case 501: #line 1356 "asmparse.y" { yyval.instr = SetupInstr(yypvt[-0].opcode); iOpcodeLen = PASM->OpcodeLen(yyval.instr); } break; case 502: #line 1359 "asmparse.y" { yyval.instr = SetupInstr(yypvt[-0].opcode); } break; case 503: #line 1362 "asmparse.y" { yyval.instr = yypvt[-1].instr; bParsingByteArray = TRUE; } break; case 504: #line 1366 "asmparse.y" { PASM->EmitOpcode(yypvt[-0].instr); } break; case 505: #line 1367 "asmparse.y" { PASM->EmitInstrVar(yypvt[-1].instr, yypvt[-0].int32); } break; case 506: #line 1368 "asmparse.y" { PASM->EmitInstrVarByName(yypvt[-1].instr, yypvt[-0].string); } break; case 507: #line 1369 "asmparse.y" { PASM->EmitInstrI(yypvt[-1].instr, yypvt[-0].int32); } break; case 508: #line 1370 "asmparse.y" { PASM->EmitInstrI8(yypvt[-1].instr, yypvt[-0].int64); } break; case 509: #line 1371 "asmparse.y" { PASM->EmitInstrR(yypvt[-1].instr, yypvt[-0].float64); delete (yypvt[-0].float64);} break; case 510: #line 1372 "asmparse.y" { double f = (double) (*yypvt[-0].int64); PASM->EmitInstrR(yypvt[-1].instr, &f); } break; case 511: #line 1373 "asmparse.y" { unsigned L = yypvt[-1].binstr->length(); FAIL_UNLESS(L >= sizeof(float), ("%d hexbytes, must be at least %d\n", L,sizeof(float))); if(L < sizeof(float)) {YYERROR; } else { double f = (L >= sizeof(double)) ? *((double *)(yypvt[-1].binstr->ptr())) : (double)(*(float *)(yypvt[-1].binstr->ptr())); PASM->EmitInstrR(yypvt[-2].instr,&f); } delete yypvt[-1].binstr; } break; case 512: #line 1382 "asmparse.y" { PASM->EmitInstrBrOffset(yypvt[-1].instr, yypvt[-0].int32); } break; case 513: #line 1383 "asmparse.y" { PASM->EmitInstrBrTarget(yypvt[-1].instr, yypvt[-0].string); } break; case 514: #line 1385 "asmparse.y" { PASM->SetMemberRefFixup(yypvt[-0].token,PASM->OpcodeLen(yypvt[-1].instr)); PASM->EmitInstrI(yypvt[-1].instr,yypvt[-0].token); PASM->m_tkCurrentCVOwner = yypvt[-0].token; PASM->m_pCustomDescrList = NULL; iCallConv = 0; } break; case 515: #line 1392 "asmparse.y" { yypvt[-3].binstr->insertInt8(IMAGE_CEE_CS_CALLCONV_FIELD); mdToken mr = PASM->MakeMemberRef(yypvt[-2].token, yypvt[-0].string, yypvt[-3].binstr); PASM->SetMemberRefFixup(mr, PASM->OpcodeLen(yypvt[-4].instr)); PASM->EmitInstrI(yypvt[-4].instr,mr); PASM->m_tkCurrentCVOwner = mr; PASM->m_pCustomDescrList = NULL; } break; case 516: #line 1400 "asmparse.y" { yypvt[-1].binstr->insertInt8(IMAGE_CEE_CS_CALLCONV_FIELD); mdToken mr = PASM->MakeMemberRef(mdTokenNil, yypvt[-0].string, yypvt[-1].binstr); PASM->SetMemberRefFixup(mr, PASM->OpcodeLen(yypvt[-2].instr)); PASM->EmitInstrI(yypvt[-2].instr,mr); PASM->m_tkCurrentCVOwner = mr; PASM->m_pCustomDescrList = NULL; } break; case 517: #line 1407 "asmparse.y" { mdToken mr = yypvt[-0].token; PASM->SetMemberRefFixup(mr, PASM->OpcodeLen(yypvt[-1].instr)); PASM->EmitInstrI(yypvt[-1].instr,mr); PASM->m_tkCurrentCVOwner = mr; PASM->m_pCustomDescrList = NULL; } break; case 518: #line 1413 "asmparse.y" { mdToken mr = yypvt[-0].tdd->m_tkTypeSpec; PASM->SetMemberRefFixup(mr, PASM->OpcodeLen(yypvt[-1].instr)); PASM->EmitInstrI(yypvt[-1].instr,mr); PASM->m_tkCurrentCVOwner = mr; PASM->m_pCustomDescrList = NULL; } break; case 519: #line 1419 "asmparse.y" { mdToken mr = yypvt[-0].tdd->m_tkTypeSpec; PASM->SetMemberRefFixup(mr, PASM->OpcodeLen(yypvt[-1].instr)); PASM->EmitInstrI(yypvt[-1].instr,mr); PASM->m_tkCurrentCVOwner = mr; PASM->m_pCustomDescrList = NULL; } break; case 520: #line 1425 "asmparse.y" { PASM->EmitInstrI(yypvt[-1].instr, yypvt[-0].token); PASM->m_tkCurrentCVOwner = yypvt[-0].token; PASM->m_pCustomDescrList = NULL; } break; case 521: #line 1429 "asmparse.y" { PASM->EmitInstrStringLiteral(yypvt[-1].instr, yypvt[-0].binstr,TRUE); } break; case 522: #line 1431 "asmparse.y" { PASM->EmitInstrStringLiteral(yypvt[-4].instr, yypvt[-1].binstr,FALSE); } break; case 523: #line 1433 "asmparse.y" { PASM->EmitInstrStringLiteral(yypvt[-3].instr, yypvt[-1].binstr,FALSE,TRUE); } break; case 524: #line 1435 "asmparse.y" { PASM->EmitInstrSig(yypvt[-5].instr, parser->MakeSig(yypvt[-4].int32, yypvt[-3].binstr, yypvt[-1].binstr)); PASM->ResetArgNameList(); } break; case 525: #line 1439 "asmparse.y" { PASM->EmitInstrI(yypvt[-1].instr,yypvt[-0].token); PASM->m_tkCurrentCVOwner = yypvt[-0].token; PASM->m_pCustomDescrList = NULL; iOpcodeLen = 0; } break; case 526: #line 1444 "asmparse.y" { PASM->EmitInstrSwitch(yypvt[-3].instr, yypvt[-1].labels); } break; case 527: #line 1447 "asmparse.y" { yyval.labels = 0; } break; case 528: #line 1448 "asmparse.y" { yyval.labels = new Labels(yypvt[-2].string, yypvt[-0].labels, TRUE); } break; case 529: #line 1449 "asmparse.y" { yyval.labels = new Labels((char *)(UINT_PTR)yypvt[-2].int32, yypvt[-0].labels, FALSE); } break; case 530: #line 1450 "asmparse.y" { yyval.labels = new Labels(yypvt[-0].string, NULL, TRUE); } break; case 531: #line 1451 "asmparse.y" { yyval.labels = new Labels((char *)(UINT_PTR)yypvt[-0].int32, NULL, FALSE); } break; case 532: #line 1455 "asmparse.y" { yyval.binstr = NULL; } break; case 533: #line 1456 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; } break; case 534: #line 1459 "asmparse.y" { yyval.binstr = NULL; } break; case 535: #line 1460 "asmparse.y" { yyval.binstr = yypvt[-0].binstr; } break; case 536: #line 1463 "asmparse.y" { yyval.binstr = yypvt[-0].binstr; } break; case 537: #line 1464 "asmparse.y" { yyval.binstr = yypvt[-2].binstr; yyval.binstr->append(yypvt[-0].binstr); delete yypvt[-0].binstr; } break; case 538: #line 1468 "asmparse.y" { yyval.binstr = new BinStr(); } break; case 539: #line 1469 "asmparse.y" { yyval.binstr = yypvt[-0].binstr;} break; case 540: #line 1472 "asmparse.y" { yyval.binstr = yypvt[-0].binstr; } break; case 541: #line 1473 "asmparse.y" { yyval.binstr = yypvt[-2].binstr; yyval.binstr->append(yypvt[-0].binstr); delete yypvt[-0].binstr; } break; case 542: #line 1476 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_SENTINEL); } break; case 543: #line 1477 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->append(yypvt[-1].binstr); PASM->addArgName(NULL, yypvt[-1].binstr, yypvt[-0].binstr, yypvt[-2].int32); } break; case 544: #line 1478 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->append(yypvt[-2].binstr); PASM->addArgName(yypvt[-0].string, yypvt[-2].binstr, yypvt[-1].binstr, yypvt[-3].int32);} break; case 545: #line 1482 "asmparse.y" { yyval.token = PASM->ResolveClassRef(PASM->GetAsmRef(yypvt[-2].string), yypvt[-0].string, NULL); delete[] yypvt[-2].string;} break; case 546: #line 1483 "asmparse.y" { yyval.token = PASM->ResolveClassRef(yypvt[-2].token, yypvt[-0].string, NULL); } break; case 547: #line 1484 "asmparse.y" { yyval.token = PASM->ResolveClassRef(mdTokenNil, yypvt[-0].string, NULL); } break; case 548: #line 1485 "asmparse.y" { yyval.token = PASM->ResolveClassRef(PASM->GetModRef(yypvt[-2].string),yypvt[-0].string, NULL); delete[] yypvt[-2].string;} break; case 549: #line 1486 "asmparse.y" { yyval.token = PASM->ResolveClassRef(1,yypvt[-0].string,NULL); } break; case 550: #line 1487 "asmparse.y" { yyval.token = yypvt[-0].token; } break; case 551: #line 1488 "asmparse.y" { yyval.token = yypvt[-0].tdd->m_tkTypeSpec; } break; case 552: #line 1489 "asmparse.y" { if(PASM->m_pCurClass != NULL) yyval.token = PASM->m_pCurClass->m_cl; else { yyval.token = 0; PASM->report->error(".this outside class scope\n"); } } break; case 553: #line 1492 "asmparse.y" { if(PASM->m_pCurClass != NULL) { yyval.token = PASM->m_pCurClass->m_crExtends; if(RidFromToken(yyval.token) == 0) PASM->report->error(".base undefined\n"); } else { yyval.token = 0; PASM->report->error(".base outside class scope\n"); } } break; case 554: #line 1498 "asmparse.y" { if(PASM->m_pCurClass != NULL) { if(PASM->m_pCurClass->m_pEncloser != NULL) yyval.token = PASM->m_pCurClass->m_pEncloser->m_cl; else { yyval.token = 0; PASM->report->error(".nester undefined\n"); } } else { yyval.token = 0; PASM->report->error(".nester outside class scope\n"); } } break; case 555: #line 1505 "asmparse.y" { yyval.string = yypvt[-0].string; } break; case 556: #line 1506 "asmparse.y" { yyval.string = newStringWDel(yypvt[-2].string, NESTING_SEP, yypvt[-0].string); } break; case 557: #line 1509 "asmparse.y" { yyval.token = yypvt[-0].token;} break; case 558: #line 1510 "asmparse.y" { yyval.token = PASM->GetAsmRef(yypvt[-1].string); delete[] yypvt[-1].string;} break; case 559: #line 1511 "asmparse.y" { yyval.token = PASM->GetModRef(yypvt[-1].string); delete[] yypvt[-1].string;} break; case 560: #line 1512 "asmparse.y" { yyval.token = PASM->ResolveTypeSpec(yypvt[-0].binstr); } break; case 561: #line 1516 "asmparse.y" { yyval.binstr = new BinStr(); } break; case 562: #line 1518 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_CUSTOMMARSHALER); corEmitInt(yyval.binstr,yypvt[-7].binstr->length()); yyval.binstr->append(yypvt[-7].binstr); corEmitInt(yyval.binstr,yypvt[-5].binstr->length()); yyval.binstr->append(yypvt[-5].binstr); corEmitInt(yyval.binstr,yypvt[-3].binstr->length()); yyval.binstr->append(yypvt[-3].binstr); corEmitInt(yyval.binstr,yypvt[-1].binstr->length()); yyval.binstr->append(yypvt[-1].binstr); PASM->report->warn("Deprecated 4-string form of custom marshaler, first two strings ignored\n");} break; case 563: #line 1525 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_CUSTOMMARSHALER); corEmitInt(yyval.binstr,0); corEmitInt(yyval.binstr,0); corEmitInt(yyval.binstr,yypvt[-3].binstr->length()); yyval.binstr->append(yypvt[-3].binstr); corEmitInt(yyval.binstr,yypvt[-1].binstr->length()); yyval.binstr->append(yypvt[-1].binstr); } break; case 564: #line 1530 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_FIXEDSYSSTRING); corEmitInt(yyval.binstr,yypvt[-1].int32); } break; case 565: #line 1533 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_FIXEDARRAY); corEmitInt(yyval.binstr,yypvt[-2].int32); yyval.binstr->append(yypvt[-0].binstr); } break; case 566: #line 1535 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_VARIANT); PASM->report->warn("Deprecated native type 'variant'\n"); } break; case 567: #line 1537 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_CURRENCY); } break; case 568: #line 1538 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_SYSCHAR); PASM->report->warn("Deprecated native type 'syschar'\n"); } break; case 569: #line 1540 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_VOID); PASM->report->warn("Deprecated native type 'void'\n"); } break; case 570: #line 1542 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_BOOLEAN); } break; case 571: #line 1543 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_I1); } break; case 572: #line 1544 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_I2); } break; case 573: #line 1545 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_I4); } break; case 574: #line 1546 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_I8); } break; case 575: #line 1547 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_R4); } break; case 576: #line 1548 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_R8); } break; case 577: #line 1549 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_ERROR); } break; case 578: #line 1550 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_U1); } break; case 579: #line 1551 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_U2); } break; case 580: #line 1552 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_U4); } break; case 581: #line 1553 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_U8); } break; case 582: #line 1554 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_U1); } break; case 583: #line 1555 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_U2); } break; case 584: #line 1556 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_U4); } break; case 585: #line 1557 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_U8); } break; case 586: #line 1558 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->insertInt8(NATIVE_TYPE_PTR); PASM->report->warn("Deprecated native type '*'\n"); } break; case 587: #line 1560 "asmparse.y" { yyval.binstr = yypvt[-2].binstr; if(yyval.binstr->length()==0) yyval.binstr->appendInt8(NATIVE_TYPE_MAX); yyval.binstr->insertInt8(NATIVE_TYPE_ARRAY); } break; case 588: #line 1562 "asmparse.y" { yyval.binstr = yypvt[-3].binstr; if(yyval.binstr->length()==0) yyval.binstr->appendInt8(NATIVE_TYPE_MAX); yyval.binstr->insertInt8(NATIVE_TYPE_ARRAY); corEmitInt(yyval.binstr,0); corEmitInt(yyval.binstr,yypvt[-1].int32); corEmitInt(yyval.binstr,0); } break; case 589: #line 1567 "asmparse.y" { yyval.binstr = yypvt[-5].binstr; if(yyval.binstr->length()==0) yyval.binstr->appendInt8(NATIVE_TYPE_MAX); yyval.binstr->insertInt8(NATIVE_TYPE_ARRAY); corEmitInt(yyval.binstr,yypvt[-1].int32); corEmitInt(yyval.binstr,yypvt[-3].int32); corEmitInt(yyval.binstr,ntaSizeParamIndexSpecified); } break; case 590: #line 1572 "asmparse.y" { yyval.binstr = yypvt[-4].binstr; if(yyval.binstr->length()==0) yyval.binstr->appendInt8(NATIVE_TYPE_MAX); yyval.binstr->insertInt8(NATIVE_TYPE_ARRAY); corEmitInt(yyval.binstr,yypvt[-1].int32); } break; case 591: #line 1575 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_DECIMAL); PASM->report->warn("Deprecated native type 'decimal'\n"); } break; case 592: #line 1577 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_DATE); PASM->report->warn("Deprecated native type 'date'\n"); } break; case 593: #line 1579 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_BSTR); } break; case 594: #line 1580 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_LPSTR); } break; case 595: #line 1581 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_LPWSTR); } break; case 596: #line 1582 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_LPTSTR); } break; case 597: #line 1583 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_OBJECTREF); PASM->report->warn("Deprecated native type 'objectref'\n"); } break; case 598: #line 1585 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_IUNKNOWN); if(yypvt[-0].int32 != -1) corEmitInt(yyval.binstr,yypvt[-0].int32); } break; case 599: #line 1587 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_IDISPATCH); if(yypvt[-0].int32 != -1) corEmitInt(yyval.binstr,yypvt[-0].int32); } break; case 600: #line 1589 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_STRUCT); } break; case 601: #line 1590 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_INTF); if(yypvt[-0].int32 != -1) corEmitInt(yyval.binstr,yypvt[-0].int32); } break; case 602: #line 1592 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_SAFEARRAY); corEmitInt(yyval.binstr,yypvt[-0].int32); corEmitInt(yyval.binstr,0);} break; case 603: #line 1595 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_SAFEARRAY); corEmitInt(yyval.binstr,yypvt[-2].int32); corEmitInt(yyval.binstr,yypvt[-0].binstr->length()); yyval.binstr->append(yypvt[-0].binstr); } break; case 604: #line 1599 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_INT); } break; case 605: #line 1600 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_UINT); } break; case 606: #line 1601 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_UINT); } break; case 607: #line 1602 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_NESTEDSTRUCT); PASM->report->warn("Deprecated native type 'nested struct'\n"); } break; case 608: #line 1604 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_BYVALSTR); } break; case 609: #line 1605 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_ANSIBSTR); } break; case 610: #line 1606 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_TBSTR); } break; case 611: #line 1607 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_VARIANTBOOL); } break; case 612: #line 1608 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_FUNC); } break; case 613: #line 1609 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_ASANY); } break; case 614: #line 1610 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(NATIVE_TYPE_LPSTRUCT); } break; case 615: #line 1611 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->append(yypvt[-0].tdd->m_pbsTypeSpec); } break; case 616: #line 1614 "asmparse.y" { yyval.int32 = -1; } break; case 617: #line 1615 "asmparse.y" { yyval.int32 = yypvt[-1].int32; } break; case 618: #line 1618 "asmparse.y" { yyval.int32 = VT_EMPTY; } break; case 619: #line 1619 "asmparse.y" { yyval.int32 = VT_NULL; } break; case 620: #line 1620 "asmparse.y" { yyval.int32 = VT_VARIANT; } break; case 621: #line 1621 "asmparse.y" { yyval.int32 = VT_CY; } break; case 622: #line 1622 "asmparse.y" { yyval.int32 = VT_VOID; } break; case 623: #line 1623 "asmparse.y" { yyval.int32 = VT_BOOL; } break; case 624: #line 1624 "asmparse.y" { yyval.int32 = VT_I1; } break; case 625: #line 1625 "asmparse.y" { yyval.int32 = VT_I2; } break; case 626: #line 1626 "asmparse.y" { yyval.int32 = VT_I4; } break; case 627: #line 1627 "asmparse.y" { yyval.int32 = VT_I8; } break; case 628: #line 1628 "asmparse.y" { yyval.int32 = VT_R4; } break; case 629: #line 1629 "asmparse.y" { yyval.int32 = VT_R8; } break; case 630: #line 1630 "asmparse.y" { yyval.int32 = VT_UI1; } break; case 631: #line 1631 "asmparse.y" { yyval.int32 = VT_UI2; } break; case 632: #line 1632 "asmparse.y" { yyval.int32 = VT_UI4; } break; case 633: #line 1633 "asmparse.y" { yyval.int32 = VT_UI8; } break; case 634: #line 1634 "asmparse.y" { yyval.int32 = VT_UI1; } break; case 635: #line 1635 "asmparse.y" { yyval.int32 = VT_UI2; } break; case 636: #line 1636 "asmparse.y" { yyval.int32 = VT_UI4; } break; case 637: #line 1637 "asmparse.y" { yyval.int32 = VT_UI8; } break; case 638: #line 1638 "asmparse.y" { yyval.int32 = VT_PTR; } break; case 639: #line 1639 "asmparse.y" { yyval.int32 = yypvt[-2].int32 | VT_ARRAY; } break; case 640: #line 1640 "asmparse.y" { yyval.int32 = yypvt[-1].int32 | VT_VECTOR; } break; case 641: #line 1641 "asmparse.y" { yyval.int32 = yypvt[-1].int32 | VT_BYREF; } break; case 642: #line 1642 "asmparse.y" { yyval.int32 = VT_DECIMAL; } break; case 643: #line 1643 "asmparse.y" { yyval.int32 = VT_DATE; } break; case 644: #line 1644 "asmparse.y" { yyval.int32 = VT_BSTR; } break; case 645: #line 1645 "asmparse.y" { yyval.int32 = VT_LPSTR; } break; case 646: #line 1646 "asmparse.y" { yyval.int32 = VT_LPWSTR; } break; case 647: #line 1647 "asmparse.y" { yyval.int32 = VT_UNKNOWN; } break; case 648: #line 1648 "asmparse.y" { yyval.int32 = VT_DISPATCH; } break; case 649: #line 1649 "asmparse.y" { yyval.int32 = VT_SAFEARRAY; } break; case 650: #line 1650 "asmparse.y" { yyval.int32 = VT_INT; } break; case 651: #line 1651 "asmparse.y" { yyval.int32 = VT_UINT; } break; case 652: #line 1652 "asmparse.y" { yyval.int32 = VT_UINT; } break; case 653: #line 1653 "asmparse.y" { yyval.int32 = VT_ERROR; } break; case 654: #line 1654 "asmparse.y" { yyval.int32 = VT_HRESULT; } break; case 655: #line 1655 "asmparse.y" { yyval.int32 = VT_CARRAY; } break; case 656: #line 1656 "asmparse.y" { yyval.int32 = VT_USERDEFINED; } break; case 657: #line 1657 "asmparse.y" { yyval.int32 = VT_RECORD; } break; case 658: #line 1658 "asmparse.y" { yyval.int32 = VT_FILETIME; } break; case 659: #line 1659 "asmparse.y" { yyval.int32 = VT_BLOB; } break; case 660: #line 1660 "asmparse.y" { yyval.int32 = VT_STREAM; } break; case 661: #line 1661 "asmparse.y" { yyval.int32 = VT_STORAGE; } break; case 662: #line 1662 "asmparse.y" { yyval.int32 = VT_STREAMED_OBJECT; } break; case 663: #line 1663 "asmparse.y" { yyval.int32 = VT_STORED_OBJECT; } break; case 664: #line 1664 "asmparse.y" { yyval.int32 = VT_BLOB_OBJECT; } break; case 665: #line 1665 "asmparse.y" { yyval.int32 = VT_CF; } break; case 666: #line 1666 "asmparse.y" { yyval.int32 = VT_CLSID; } break; case 667: #line 1670 "asmparse.y" { if(yypvt[-0].token == PASM->m_tkSysString) { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_STRING); } else if(yypvt[-0].token == PASM->m_tkSysObject) { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_OBJECT); } else yyval.binstr = parser->MakeTypeClass(ELEMENT_TYPE_CLASS, yypvt[-0].token); } break; case 668: #line 1676 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_OBJECT); } break; case 669: #line 1677 "asmparse.y" { yyval.binstr = parser->MakeTypeClass(ELEMENT_TYPE_VALUETYPE, yypvt[-0].token); } break; case 670: #line 1678 "asmparse.y" { yyval.binstr = parser->MakeTypeClass(ELEMENT_TYPE_VALUETYPE, yypvt[-0].token); } break; case 671: #line 1679 "asmparse.y" { yyval.binstr = yypvt[-2].binstr; yyval.binstr->insertInt8(ELEMENT_TYPE_SZARRAY); } break; case 672: #line 1680 "asmparse.y" { yyval.binstr = parser->MakeTypeArray(ELEMENT_TYPE_ARRAY, yypvt[-3].binstr, yypvt[-1].binstr); } break; case 673: #line 1681 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->insertInt8(ELEMENT_TYPE_BYREF); } break; case 674: #line 1682 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->insertInt8(ELEMENT_TYPE_PTR); } break; case 675: #line 1683 "asmparse.y" { yyval.binstr = yypvt[-1].binstr; yyval.binstr->insertInt8(ELEMENT_TYPE_PINNED); } break; case 676: #line 1684 "asmparse.y" { yyval.binstr = parser->MakeTypeClass(ELEMENT_TYPE_CMOD_REQD, yypvt[-1].token); yyval.binstr->append(yypvt[-4].binstr); } break; case 677: #line 1686 "asmparse.y" { yyval.binstr = parser->MakeTypeClass(ELEMENT_TYPE_CMOD_OPT, yypvt[-1].token); yyval.binstr->append(yypvt[-4].binstr); } break; case 678: #line 1689 "asmparse.y" { yyval.binstr = parser->MakeSig(yypvt[-5].int32, yypvt[-4].binstr, yypvt[-1].binstr); yyval.binstr->insertInt8(ELEMENT_TYPE_FNPTR); PASM->delArgNameList(PASM->m_firstArgName); PASM->m_firstArgName = parser->m_ANSFirst.POP(); PASM->m_lastArgName = parser->m_ANSLast.POP(); } break; case 679: #line 1695 "asmparse.y" { if(yypvt[-1].binstr == NULL) yyval.binstr = yypvt[-3].binstr; else { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_GENERICINST); yyval.binstr->append(yypvt[-3].binstr); corEmitInt(yyval.binstr, corCountArgs(yypvt[-1].binstr)); yyval.binstr->append(yypvt[-1].binstr); delete yypvt[-3].binstr; delete yypvt[-1].binstr; }} break; case 680: #line 1702 "asmparse.y" { //if(PASM->m_pCurMethod) { // if(($3 < 0)||((DWORD)$3 >= PASM->m_pCurMethod->m_NumTyPars)) // PASM->report->error("Invalid method type parameter '%d'\n",$3); yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_MVAR); corEmitInt(yyval.binstr, yypvt[-0].int32); //} else PASM->report->error("Method type parameter '%d' outside method scope\n",$3); } break; case 681: #line 1708 "asmparse.y" { //if(PASM->m_pCurClass) { // if(($2 < 0)||((DWORD)$2 >= PASM->m_pCurClass->m_NumTyPars)) // PASM->report->error("Invalid type parameter '%d'\n",$2); yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_VAR); corEmitInt(yyval.binstr, yypvt[-0].int32); //} else PASM->report->error("Type parameter '%d' outside class scope\n",$2); } break; case 682: #line 1714 "asmparse.y" { int eltype = ELEMENT_TYPE_MVAR; int n=-1; if(PASM->m_pCurMethod) n = PASM->m_pCurMethod->FindTyPar(yypvt[-0].string); else { if(PASM->m_TyParList) n = PASM->m_TyParList->IndexOf(yypvt[-0].string); if(n == -1) { n = TyParFixupList.COUNT(); TyParFixupList.PUSH(yypvt[-0].string); eltype = ELEMENT_TYPE_MVARFIXUP; } } if(n == -1) { PASM->report->error("Invalid method type parameter '%s'\n",yypvt[-0].string); n = 0x1FFFFFFF; } yyval.binstr = new BinStr(); yyval.binstr->appendInt8(eltype); corEmitInt(yyval.binstr,n); } break; case 683: #line 1729 "asmparse.y" { int eltype = ELEMENT_TYPE_VAR; int n=-1; if(PASM->m_pCurClass && !newclass) n = PASM->m_pCurClass->FindTyPar(yypvt[-0].string); else { if(PASM->m_TyParList) n = PASM->m_TyParList->IndexOf(yypvt[-0].string); if(n == -1) { n = TyParFixupList.COUNT(); TyParFixupList.PUSH(yypvt[-0].string); eltype = ELEMENT_TYPE_VARFIXUP; } } if(n == -1) { PASM->report->error("Invalid type parameter '%s'\n",yypvt[-0].string); n = 0x1FFFFFFF; } yyval.binstr = new BinStr(); yyval.binstr->appendInt8(eltype); corEmitInt(yyval.binstr,n); } break; case 684: #line 1744 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_TYPEDBYREF); } break; case 685: #line 1745 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_VOID); } break; case 686: #line 1746 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_I); } break; case 687: #line 1747 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_U); } break; case 688: #line 1748 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_U); } break; case 689: #line 1749 "asmparse.y" { yyval.binstr = yypvt[-0].binstr; } break; case 690: #line 1750 "asmparse.y" { yyval.binstr = yypvt[-0].binstr; yyval.binstr->insertInt8(ELEMENT_TYPE_SENTINEL); } break; case 691: #line 1753 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_CHAR); } break; case 692: #line 1754 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_STRING); } break; case 693: #line 1755 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_BOOLEAN); } break; case 694: #line 1756 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_I1); } break; case 695: #line 1757 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_I2); } break; case 696: #line 1758 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_I4); } break; case 697: #line 1759 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_I8); } break; case 698: #line 1760 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_R4); } break; case 699: #line 1761 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_R8); } break; case 700: #line 1762 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_U1); } break; case 701: #line 1763 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_U2); } break; case 702: #line 1764 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_U4); } break; case 703: #line 1765 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_U8); } break; case 704: #line 1766 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_U1); } break; case 705: #line 1767 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_U2); } break; case 706: #line 1768 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_U4); } break; case 707: #line 1769 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(ELEMENT_TYPE_U8); } break; case 708: #line 1770 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->append(yypvt[-0].tdd->m_pbsTypeSpec); } break; case 709: #line 1773 "asmparse.y" { yyval.binstr = yypvt[-0].binstr; } break; case 710: #line 1774 "asmparse.y" { yyval.binstr = yypvt[-2].binstr; yypvt[-2].binstr->append(yypvt[-0].binstr); delete yypvt[-0].binstr; } break; case 711: #line 1777 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt32(0x7FFFFFFF); yyval.binstr->appendInt32(0x7FFFFFFF); } break; case 712: #line 1778 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt32(0x7FFFFFFF); yyval.binstr->appendInt32(0x7FFFFFFF); } break; case 713: #line 1779 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt32(0); yyval.binstr->appendInt32(yypvt[-0].int32); } break; case 714: #line 1780 "asmparse.y" { FAIL_UNLESS(yypvt[-2].int32 <= yypvt[-0].int32, ("lower bound %d must be <= upper bound %d\n", yypvt[-2].int32, yypvt[-0].int32)); if (yypvt[-2].int32 > yypvt[-0].int32) { YYERROR; }; yyval.binstr = new BinStr(); yyval.binstr->appendInt32(yypvt[-2].int32); yyval.binstr->appendInt32(yypvt[-0].int32-yypvt[-2].int32+1); } break; case 715: #line 1783 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt32(yypvt[-1].int32); yyval.binstr->appendInt32(0x7FFFFFFF); } break; case 716: #line 1788 "asmparse.y" { PASM->AddPermissionDecl(yypvt[-4].secAct, yypvt[-3].token, yypvt[-1].pair); } break; case 717: #line 1790 "asmparse.y" { PASM->AddPermissionDecl(yypvt[-5].secAct, yypvt[-4].token, yypvt[-1].binstr); } break; case 718: #line 1791 "asmparse.y" { PASM->AddPermissionDecl(yypvt[-1].secAct, yypvt[-0].token, (NVPair *)NULL); } break; case 719: #line 1792 "asmparse.y" { PASM->AddPermissionSetDecl(yypvt[-2].secAct, yypvt[-1].binstr); } break; case 720: #line 1794 "asmparse.y" { PASM->AddPermissionSetDecl(yypvt[-1].secAct,BinStrToUnicode(yypvt[-0].binstr,true));} break; case 721: #line 1796 "asmparse.y" { BinStr* ret = new BinStr(); ret->insertInt8('.'); corEmitInt(ret, nSecAttrBlobs); ret->append(yypvt[-1].binstr); PASM->AddPermissionSetDecl(yypvt[-4].secAct,ret); nSecAttrBlobs = 0; } break; case 722: #line 1804 "asmparse.y" { yyval.binstr = new BinStr(); nSecAttrBlobs = 0;} break; case 723: #line 1805 "asmparse.y" { yyval.binstr = yypvt[-0].binstr; nSecAttrBlobs = 1; } break; case 724: #line 1806 "asmparse.y" { yyval.binstr = yypvt[-2].binstr; yyval.binstr->append(yypvt[-0].binstr); nSecAttrBlobs++; } break; case 725: #line 1810 "asmparse.y" { yyval.binstr = PASM->EncodeSecAttr(PASM->ReflectionNotation(yypvt[-4].token),yypvt[-1].binstr,nCustomBlobNVPairs); nCustomBlobNVPairs = 0; } break; case 726: #line 1813 "asmparse.y" { yyval.binstr = PASM->EncodeSecAttr(yypvt[-4].string,yypvt[-1].binstr,nCustomBlobNVPairs); nCustomBlobNVPairs = 0; } break; case 727: #line 1817 "asmparse.y" { yyval.secAct = yypvt[-2].secAct; bParsingByteArray = TRUE; } break; case 728: #line 1819 "asmparse.y" { yyval.secAct = yypvt[-2].secAct; bParsingByteArray = TRUE; } break; case 729: #line 1822 "asmparse.y" { yyval.pair = yypvt[-0].pair; } break; case 730: #line 1823 "asmparse.y" { yyval.pair = yypvt[-2].pair->Concat(yypvt[-0].pair); } break; case 731: #line 1826 "asmparse.y" { yypvt[-2].binstr->appendInt8(0); yyval.pair = new NVPair(yypvt[-2].binstr, yypvt[-0].binstr); } break; case 732: #line 1829 "asmparse.y" { yyval.int32 = 1; } break; case 733: #line 1830 "asmparse.y" { yyval.int32 = 0; } break; case 734: #line 1833 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(SERIALIZATION_TYPE_BOOLEAN); yyval.binstr->appendInt8(yypvt[-0].int32); } break; case 735: #line 1836 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(SERIALIZATION_TYPE_I4); yyval.binstr->appendInt32(yypvt[-0].int32); } break; case 736: #line 1839 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(SERIALIZATION_TYPE_I4); yyval.binstr->appendInt32(yypvt[-1].int32); } break; case 737: #line 1842 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(SERIALIZATION_TYPE_STRING); yyval.binstr->append(yypvt[-0].binstr); delete yypvt[-0].binstr; yyval.binstr->appendInt8(0); } break; case 738: #line 1846 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(SERIALIZATION_TYPE_ENUM); char* sz = PASM->ReflectionNotation(yypvt[-5].token); strcpy_s((char *)yyval.binstr->getBuff((unsigned)strlen(sz) + 1), strlen(sz) + 1,sz); yyval.binstr->appendInt8(1); yyval.binstr->appendInt32(yypvt[-1].int32); } break; case 739: #line 1852 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(SERIALIZATION_TYPE_ENUM); char* sz = PASM->ReflectionNotation(yypvt[-5].token); strcpy_s((char *)yyval.binstr->getBuff((unsigned)strlen(sz) + 1), strlen(sz) + 1,sz); yyval.binstr->appendInt8(2); yyval.binstr->appendInt32(yypvt[-1].int32); } break; case 740: #line 1858 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(SERIALIZATION_TYPE_ENUM); char* sz = PASM->ReflectionNotation(yypvt[-5].token); strcpy_s((char *)yyval.binstr->getBuff((unsigned)strlen(sz) + 1), strlen(sz) + 1,sz); yyval.binstr->appendInt8(4); yyval.binstr->appendInt32(yypvt[-1].int32); } break; case 741: #line 1864 "asmparse.y" { yyval.binstr = new BinStr(); yyval.binstr->appendInt8(SERIALIZATION_TYPE_ENUM); char* sz = PASM->ReflectionNotation(yypvt[-3].token); strcpy_s((char *)yyval.binstr->getBuff((unsigned)strlen(sz) + 1), strlen(sz) + 1,sz); yyval.binstr->appendInt8(4); yyval.binstr->appendInt32(yypvt[-1].int32); } break; case 742: #line 1872 "asmparse.y" { yyval.secAct = dclRequest; } break; case 743: #line 1873 "asmparse.y" { yyval.secAct = dclDemand; } break; case 744: #line 1874 "asmparse.y" { yyval.secAct = dclAssert; } break; case 745: #line 1875 "asmparse.y" { yyval.secAct = dclDeny; } break; case 746: #line 1876 "asmparse.y" { yyval.secAct = dclPermitOnly; } break; case 747: #line 1877 "asmparse.y" { yyval.secAct = dclLinktimeCheck; } break; case 748: #line 1878 "asmparse.y" { yyval.secAct = dclInheritanceCheck; } break; case 749: #line 1879 "asmparse.y" { yyval.secAct = dclRequestMinimum; } break; case 750: #line 1880 "asmparse.y" { yyval.secAct = dclRequestOptional; } break; case 751: #line 1881 "asmparse.y" { yyval.secAct = dclRequestRefuse; } break; case 752: #line 1882 "asmparse.y" { yyval.secAct = dclPrejitGrant; } break; case 753: #line 1883 "asmparse.y" { yyval.secAct = dclPrejitDenied; } break; case 754: #line 1884 "asmparse.y" { yyval.secAct = dclNonCasDemand; } break; case 755: #line 1885 "asmparse.y" { yyval.secAct = dclNonCasLinkDemand; } break; case 756: #line 1886 "asmparse.y" { yyval.secAct = dclNonCasInheritance; } break; case 757: #line 1890 "asmparse.y" { PASM->ResetLineNumbers(); nCurrPC = PASM->m_CurPC; PENV->bExternSource = TRUE; PENV->bExternSourceAutoincrement = FALSE; } break; case 758: #line 1891 "asmparse.y" { PASM->ResetLineNumbers(); nCurrPC = PASM->m_CurPC; PENV->bExternSource = TRUE; PENV->bExternSourceAutoincrement = TRUE; } break; case 759: #line 1894 "asmparse.y" { PENV->nExtLine = PENV->nExtLineEnd = yypvt[-1].int32; PENV->nExtCol = 0; PENV->nExtColEnd = static_cast<unsigned>(-1); PASM->SetSourceFileName(yypvt[-0].string);} break; case 760: #line 1897 "asmparse.y" { PENV->nExtLine = PENV->nExtLineEnd = yypvt[-0].int32; PENV->nExtCol = 0; PENV->nExtColEnd = static_cast<unsigned>(-1); } break; case 761: #line 1899 "asmparse.y" { PENV->nExtLine = PENV->nExtLineEnd = yypvt[-3].int32; PENV->nExtCol=yypvt[-1].int32; PENV->nExtColEnd = static_cast<unsigned>(-1); PASM->SetSourceFileName(yypvt[-0].string);} break; case 762: #line 1902 "asmparse.y" { PENV->nExtLine = PENV->nExtLineEnd = yypvt[-2].int32; PENV->nExtCol=yypvt[-0].int32; PENV->nExtColEnd = static_cast<unsigned>(-1);} break; case 763: #line 1905 "asmparse.y" { PENV->nExtLine = PENV->nExtLineEnd = yypvt[-5].int32; PENV->nExtCol=yypvt[-3].int32; PENV->nExtColEnd = yypvt[-1].int32; PASM->SetSourceFileName(yypvt[-0].string);} break; case 764: #line 1909 "asmparse.y" { PENV->nExtLine = PENV->nExtLineEnd = yypvt[-4].int32; PENV->nExtCol=yypvt[-2].int32; PENV->nExtColEnd = yypvt[-0].int32; } break; case 765: #line 1912 "asmparse.y" { PENV->nExtLine = yypvt[-5].int32; PENV->nExtLineEnd = yypvt[-3].int32; PENV->nExtCol=yypvt[-1].int32; PENV->nExtColEnd = static_cast<unsigned>(-1); PASM->SetSourceFileName(yypvt[-0].string);} break; case 766: #line 1916 "asmparse.y" { PENV->nExtLine = yypvt[-4].int32; PENV->nExtLineEnd = yypvt[-2].int32; PENV->nExtCol=yypvt[-0].int32; PENV->nExtColEnd = static_cast<unsigned>(-1); } break; case 767: #line 1919 "asmparse.y" { PENV->nExtLine = yypvt[-7].int32; PENV->nExtLineEnd = yypvt[-5].int32; PENV->nExtCol=yypvt[-3].int32; PENV->nExtColEnd = yypvt[-1].int32; PASM->SetSourceFileName(yypvt[-0].string);} break; case 768: #line 1923 "asmparse.y" { PENV->nExtLine = yypvt[-6].int32; PENV->nExtLineEnd = yypvt[-4].int32; PENV->nExtCol=yypvt[-2].int32; PENV->nExtColEnd = yypvt[-0].int32; } break; case 769: #line 1925 "asmparse.y" { PENV->nExtLine = PENV->nExtLineEnd = yypvt[-1].int32 - 1; PENV->nExtCol = 0; PENV->nExtColEnd = static_cast<unsigned>(-1); PASM->SetSourceFileName(yypvt[-0].binstr);} break; case 770: #line 1932 "asmparse.y" { PASMM->AddFile(yypvt[-5].string, yypvt[-6].fileAttr|yypvt[-4].fileAttr|yypvt[-0].fileAttr, yypvt[-2].binstr); } break; case 771: #line 1933 "asmparse.y" { PASMM->AddFile(yypvt[-1].string, yypvt[-2].fileAttr|yypvt[-0].fileAttr, NULL); } break; case 772: #line 1936 "asmparse.y" { yyval.fileAttr = (CorFileFlags) 0; } break; case 773: #line 1937 "asmparse.y" { yyval.fileAttr = (CorFileFlags) (yypvt[-1].fileAttr | ffContainsNoMetaData); } break; case 774: #line 1940 "asmparse.y" { yyval.fileAttr = (CorFileFlags) 0; } break; case 775: #line 1941 "asmparse.y" { yyval.fileAttr = (CorFileFlags) 0x80000000; } break; case 776: #line 1944 "asmparse.y" { bParsingByteArray = TRUE; } break; case 777: #line 1947 "asmparse.y" { PASMM->StartAssembly(yypvt[-0].string, NULL, (DWORD)yypvt[-1].asmAttr, FALSE); } break; case 778: #line 1950 "asmparse.y" { yyval.asmAttr = (CorAssemblyFlags) 0; } break; case 779: #line 1951 "asmparse.y" { yyval.asmAttr = (CorAssemblyFlags) (yypvt[-1].asmAttr | afRetargetable); } break; case 780: #line 1952 "asmparse.y" { yyval.asmAttr = (CorAssemblyFlags) (yypvt[-1].asmAttr | afContentType_WindowsRuntime); } break; case 781: #line 1953 "asmparse.y" { yyval.asmAttr = (CorAssemblyFlags) (yypvt[-1].asmAttr | afPA_NoPlatform); } break; case 782: #line 1954 "asmparse.y" { yyval.asmAttr = yypvt[-2].asmAttr; } break; case 783: #line 1955 "asmparse.y" { SET_PA(yyval.asmAttr,yypvt[-1].asmAttr,afPA_MSIL); } break; case 784: #line 1956 "asmparse.y" { SET_PA(yyval.asmAttr,yypvt[-1].asmAttr,afPA_x86); } break; case 785: #line 1957 "asmparse.y" { SET_PA(yyval.asmAttr,yypvt[-1].asmAttr,afPA_AMD64); } break; case 786: #line 1958 "asmparse.y" { SET_PA(yyval.asmAttr,yypvt[-1].asmAttr,afPA_ARM); } break; case 787: #line 1959 "asmparse.y" { SET_PA(yyval.asmAttr,yypvt[-1].asmAttr,afPA_ARM64); } break; case 790: #line 1966 "asmparse.y" { PASMM->SetAssemblyHashAlg(yypvt[-0].int32); } break; case 793: #line 1971 "asmparse.y" { yyval.int32 = yypvt[-0].int32; } break; case 794: #line 1972 "asmparse.y" { yyval.int32 = 0xFFFF; } break; case 795: #line 1975 "asmparse.y" { PASMM->SetAssemblyPublicKey(yypvt[-1].binstr); } break; case 796: #line 1977 "asmparse.y" { PASMM->SetAssemblyVer((USHORT)yypvt[-6].int32, (USHORT)yypvt[-4].int32, (USHORT)yypvt[-2].int32, (USHORT)yypvt[-0].int32); } break; case 797: #line 1978 "asmparse.y" { yypvt[-0].binstr->appendInt8(0); PASMM->SetAssemblyLocale(yypvt[-0].binstr,TRUE); } break; case 798: #line 1979 "asmparse.y" { PASMM->SetAssemblyLocale(yypvt[-1].binstr,FALSE); } break; case 801: #line 1984 "asmparse.y" { bParsingByteArray = TRUE; } break; case 802: #line 1987 "asmparse.y" { bParsingByteArray = TRUE; } break; case 803: #line 1990 "asmparse.y" { bParsingByteArray = TRUE; } break; case 804: #line 1994 "asmparse.y" { PASMM->StartAssembly(yypvt[-0].string, NULL, yypvt[-1].asmAttr, TRUE); } break; case 805: #line 1996 "asmparse.y" { PASMM->StartAssembly(yypvt[-2].string, yypvt[-0].string, yypvt[-3].asmAttr, TRUE); } break; case 808: #line 2003 "asmparse.y" { PASMM->SetAssemblyHashBlob(yypvt[-1].binstr); } break; case 810: #line 2005 "asmparse.y" { PASMM->SetAssemblyPublicKeyToken(yypvt[-1].binstr); } break; case 811: #line 2006 "asmparse.y" { PASMM->SetAssemblyAutodetect(); } break; case 812: #line 2009 "asmparse.y" { PASMM->StartComType(yypvt[-0].string, yypvt[-1].exptAttr);} break; case 813: #line 2012 "asmparse.y" { PASMM->StartComType(yypvt[-0].string, yypvt[-1].exptAttr); } break; case 814: #line 2015 "asmparse.y" { yyval.exptAttr = (CorTypeAttr) 0; } break; case 815: #line 2016 "asmparse.y" { yyval.exptAttr = (CorTypeAttr) (yypvt[-1].exptAttr | tdNotPublic); } break; case 816: #line 2017 "asmparse.y" { yyval.exptAttr = (CorTypeAttr) (yypvt[-1].exptAttr | tdPublic); } break; case 817: #line 2018 "asmparse.y" { yyval.exptAttr = (CorTypeAttr) (yypvt[-1].exptAttr | tdForwarder); } break; case 818: #line 2019 "asmparse.y" { yyval.exptAttr = (CorTypeAttr) (yypvt[-2].exptAttr | tdNestedPublic); } break; case 819: #line 2020 "asmparse.y" { yyval.exptAttr = (CorTypeAttr) (yypvt[-2].exptAttr | tdNestedPrivate); } break; case 820: #line 2021 "asmparse.y" { yyval.exptAttr = (CorTypeAttr) (yypvt[-2].exptAttr | tdNestedFamily); } break; case 821: #line 2022 "asmparse.y" { yyval.exptAttr = (CorTypeAttr) (yypvt[-2].exptAttr | tdNestedAssembly); } break; case 822: #line 2023 "asmparse.y" { yyval.exptAttr = (CorTypeAttr) (yypvt[-2].exptAttr | tdNestedFamANDAssem); } break; case 823: #line 2024 "asmparse.y" { yyval.exptAttr = (CorTypeAttr) (yypvt[-2].exptAttr | tdNestedFamORAssem); } break; case 826: #line 2031 "asmparse.y" { PASMM->SetComTypeFile(yypvt[-0].string); } break; case 827: #line 2032 "asmparse.y" { PASMM->SetComTypeComType(yypvt[-0].string); } break; case 828: #line 2033 "asmparse.y" { PASMM->SetComTypeAsmRef(yypvt[-0].string); } break; case 829: #line 2034 "asmparse.y" { if(!PASMM->SetComTypeImplementationTok(yypvt[-1].int32)) PASM->report->error("Invalid implementation of exported type\n"); } break; case 830: #line 2036 "asmparse.y" { if(!PASMM->SetComTypeClassTok(yypvt[-0].int32)) PASM->report->error("Invalid TypeDefID of exported type\n"); } break; case 833: #line 2042 "asmparse.y" { PASMM->StartManifestRes(yypvt[-0].string, yypvt[-0].string, yypvt[-1].manresAttr); } break; case 834: #line 2044 "asmparse.y" { PASMM->StartManifestRes(yypvt[-2].string, yypvt[-0].string, yypvt[-3].manresAttr); } break; case 835: #line 2047 "asmparse.y" { yyval.manresAttr = (CorManifestResourceFlags) 0; } break; case 836: #line 2048 "asmparse.y" { yyval.manresAttr = (CorManifestResourceFlags) (yypvt[-1].manresAttr | mrPublic); } break; case 837: #line 2049 "asmparse.y" { yyval.manresAttr = (CorManifestResourceFlags) (yypvt[-1].manresAttr | mrPrivate); } break; case 840: #line 2056 "asmparse.y" { PASMM->SetManifestResFile(yypvt[-2].string, (ULONG)yypvt[-0].int32); } break; case 841: #line 2057 "asmparse.y" { PASMM->SetManifestResAsmRef(yypvt[-0].string); } break;/* End of actions */ #line 329 "F:\\NetFXDev1\\src\\tools\\devdiv\\amd64\\yypars.c" } } goto yystack; /* stack new state and value */ } #pragma warning(default:102) #ifdef YYDUMP YYLOCAL void YYNEAR YYPASCAL yydumpinfo(void) { short stackindex; short valindex; //dump yys printf("short yys[%d] {\n", YYMAXDEPTH); for (stackindex = 0; stackindex < YYMAXDEPTH; stackindex++){ if (stackindex) printf(", %s", stackindex % 10 ? "\0" : "\n"); printf("%6d", yys[stackindex]); } printf("\n};\n"); //dump yyv printf("YYSTYPE yyv[%d] {\n", YYMAXDEPTH); for (valindex = 0; valindex < YYMAXDEPTH; valindex++){ if (valindex) printf(", %s", valindex % 5 ? "\0" : "\n"); printf("%#*x", 3+sizeof(YYSTYPE), yyv[valindex]); } printf("\n};\n"); } #endif
-1
dotnet/runtime
66,367
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled
When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
jakobbotsch
2022-03-09T00:01:53Z
2022-03-09T12:59:09Z
c9f7f7389e8e9a00d501aef696333b67d218baac
cef825fd5425203ac28d073bf9fccc33c638f179
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled. When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
./src/libraries/System.Net.HttpListener/tests/HttpListenerFactory.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.Linq; using System.Net.Sockets; using System.Runtime.InteropServices; using System.Text; namespace System.Net.Tests { // Utilities for generating URL prefixes for HttpListener public class HttpListenerFactory : IDisposable { const int StartPort = 1025; const int MaxStartAttempts = IPEndPoint.MaxPort - StartPort + 1; private static readonly object s_nextPortLock = new object(); private static int s_nextPort = StartPort; private readonly HttpListener _processPrefixListener; private readonly Exception _processPrefixException; private readonly string _processPrefix; private readonly string _hostname; private readonly string _path; private readonly int _port; internal HttpListenerFactory(string hostname = "localhost", string path = null) { // Find a URL prefix that is not in use on this machine *and* uses a port that's not in use. // Once we find this prefix, keep a listener on it for the duration of the process, so other processes // can't steal it. _hostname = hostname; _path = path ?? Guid.NewGuid().ToString("N"); string pathComponent = string.IsNullOrEmpty(_path) ? _path : $"{_path}/"; for (int attempt = 0; attempt < MaxStartAttempts; attempt++) { int port = GetNextPort(); string prefix = $"http://{hostname}:{port}/{pathComponent}"; var listener = new HttpListener(); try { listener.Prefixes.Add(prefix); listener.Start(); _processPrefixListener = listener; _processPrefix = prefix; _port = port; _processPrefixException = null; Socket socket = GetConnectedSocket(); socket.Close(); break; } catch (Exception e) { // can't use this prefix listener.Close(); // Remember the exception for later _processPrefixException = e; if (e is HttpListenerException listenerException) { // If we can't access the host (e.g. if it is '+' or '*' and the current user is the administrator) // then throw. const int ERROR_ACCESS_DENIED = 5; if (listenerException.ErrorCode == ERROR_ACCESS_DENIED && (hostname == "*" || hostname == "+")) { throw new InvalidOperationException($"Access denied for host {hostname}"); } } else if (!(e is SocketException)) { // If this is not an HttpListenerException or SocketException, something very wrong has happened, and there's no point // in trying again. break; } } } // At this point, either we've reserved a prefix, or we've tried everything and failed. If we failed, // we've saved the exception for later. We'll defer actually *throwing* the exception until a test // asks for the prefix, because dealing with a type initialization exception is not nice in xunit. } public int Port { get { if (_port == 0) { throw new Exception("Could not reserve a port for HttpListener", _processPrefixException); } return _port; } } public string ListeningUrl { get { if (_processPrefix == null) { throw new Exception("Could not reserve a port for HttpListener", _processPrefixException); } return _processPrefix; } } public string Hostname => _hostname; public string Path => _path; private static bool? s_supportsWildcards; public static bool SupportsWildcards { get { if (!s_supportsWildcards.HasValue) { try { using (new HttpListenerFactory("*")) { s_supportsWildcards = true; } } catch (InvalidOperationException) { s_supportsWildcards = false; } } return s_supportsWildcards.Value; } } public HttpListener GetListener() => _processPrefixListener ?? throw new Exception("Could not reserve a port for HttpListener", _processPrefixException); public void Dispose() => _processPrefixListener?.Close(); public Socket GetConnectedSocket() { if (_processPrefixException != null) { throw new Exception("Could not create HttpListener", _processPrefixException); } string hostname = _hostname == "*" || _hostname == "+" ? "localhost" : _hostname; // Some platforms or distributions require IPv6 sockets if the OS supports IPv6. Others (e.g. Ubuntu) don't. try { AddressFamily addressFamily = Socket.OSSupportsIPv6 ? AddressFamily.InterNetworkV6 : AddressFamily.InterNetwork; Socket socket = new Socket(addressFamily, SocketType.Stream, ProtocolType.Tcp); socket.Connect(hostname, Port); return socket; } catch { Socket socket = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp); socket.Connect(hostname, Port); return socket; } } public byte[] GetContent(string httpVersion, string requestType, string query, string text, IEnumerable<string> headers, bool headerOnly) { headers = headers ?? Enumerable.Empty<string>(); Uri listeningUri = new Uri(ListeningUrl); string rawUrl = listeningUri.PathAndQuery; if (query != null) { rawUrl += query; } string content = $"{requestType} {rawUrl} HTTP/{httpVersion}\r\n"; if (!headers.Any(header => header.ToLower().StartsWith("host:"))) { content += $"Host: { listeningUri.Host}\r\n"; } if (text != null && !headers.Any(header => header.ToLower().StartsWith("content-length:"))) { content += $"Content-Length: {text.Length}\r\n"; } foreach (string header in headers) { content += header + "\r\n"; } content += "\r\n"; if (!headerOnly && text != null) { content += text; } return Encoding.UTF8.GetBytes(content); } public byte[] GetContent(string requestType, string text, bool headerOnly) { return GetContent("1.1", requestType, query: null, text: text, headers: null, headerOnly: headerOnly); } private static int GetNextPort() { lock (s_nextPortLock) { int port = s_nextPort++; if (s_nextPort > IPEndPoint.MaxPort) { s_nextPort = StartPort; } return port; } } } public static class RequestTypes { public const string POST = "POST"; } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.Linq; using System.Net.Sockets; using System.Runtime.InteropServices; using System.Text; namespace System.Net.Tests { // Utilities for generating URL prefixes for HttpListener public class HttpListenerFactory : IDisposable { const int StartPort = 1025; const int MaxStartAttempts = IPEndPoint.MaxPort - StartPort + 1; private static readonly object s_nextPortLock = new object(); private static int s_nextPort = StartPort; private readonly HttpListener _processPrefixListener; private readonly Exception _processPrefixException; private readonly string _processPrefix; private readonly string _hostname; private readonly string _path; private readonly int _port; internal HttpListenerFactory(string hostname = "localhost", string path = null) { // Find a URL prefix that is not in use on this machine *and* uses a port that's not in use. // Once we find this prefix, keep a listener on it for the duration of the process, so other processes // can't steal it. _hostname = hostname; _path = path ?? Guid.NewGuid().ToString("N"); string pathComponent = string.IsNullOrEmpty(_path) ? _path : $"{_path}/"; for (int attempt = 0; attempt < MaxStartAttempts; attempt++) { int port = GetNextPort(); string prefix = $"http://{hostname}:{port}/{pathComponent}"; var listener = new HttpListener(); try { listener.Prefixes.Add(prefix); listener.Start(); _processPrefixListener = listener; _processPrefix = prefix; _port = port; _processPrefixException = null; Socket socket = GetConnectedSocket(); socket.Close(); break; } catch (Exception e) { // can't use this prefix listener.Close(); // Remember the exception for later _processPrefixException = e; if (e is HttpListenerException listenerException) { // If we can't access the host (e.g. if it is '+' or '*' and the current user is the administrator) // then throw. const int ERROR_ACCESS_DENIED = 5; if (listenerException.ErrorCode == ERROR_ACCESS_DENIED && (hostname == "*" || hostname == "+")) { throw new InvalidOperationException($"Access denied for host {hostname}"); } } else if (!(e is SocketException)) { // If this is not an HttpListenerException or SocketException, something very wrong has happened, and there's no point // in trying again. break; } } } // At this point, either we've reserved a prefix, or we've tried everything and failed. If we failed, // we've saved the exception for later. We'll defer actually *throwing* the exception until a test // asks for the prefix, because dealing with a type initialization exception is not nice in xunit. } public int Port { get { if (_port == 0) { throw new Exception("Could not reserve a port for HttpListener", _processPrefixException); } return _port; } } public string ListeningUrl { get { if (_processPrefix == null) { throw new Exception("Could not reserve a port for HttpListener", _processPrefixException); } return _processPrefix; } } public string Hostname => _hostname; public string Path => _path; private static bool? s_supportsWildcards; public static bool SupportsWildcards { get { if (!s_supportsWildcards.HasValue) { try { using (new HttpListenerFactory("*")) { s_supportsWildcards = true; } } catch (InvalidOperationException) { s_supportsWildcards = false; } } return s_supportsWildcards.Value; } } public HttpListener GetListener() => _processPrefixListener ?? throw new Exception("Could not reserve a port for HttpListener", _processPrefixException); public void Dispose() => _processPrefixListener?.Close(); public Socket GetConnectedSocket() { if (_processPrefixException != null) { throw new Exception("Could not create HttpListener", _processPrefixException); } string hostname = _hostname == "*" || _hostname == "+" ? "localhost" : _hostname; // Some platforms or distributions require IPv6 sockets if the OS supports IPv6. Others (e.g. Ubuntu) don't. try { AddressFamily addressFamily = Socket.OSSupportsIPv6 ? AddressFamily.InterNetworkV6 : AddressFamily.InterNetwork; Socket socket = new Socket(addressFamily, SocketType.Stream, ProtocolType.Tcp); socket.Connect(hostname, Port); return socket; } catch { Socket socket = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp); socket.Connect(hostname, Port); return socket; } } public byte[] GetContent(string httpVersion, string requestType, string query, string text, IEnumerable<string> headers, bool headerOnly) { headers = headers ?? Enumerable.Empty<string>(); Uri listeningUri = new Uri(ListeningUrl); string rawUrl = listeningUri.PathAndQuery; if (query != null) { rawUrl += query; } string content = $"{requestType} {rawUrl} HTTP/{httpVersion}\r\n"; if (!headers.Any(header => header.ToLower().StartsWith("host:"))) { content += $"Host: { listeningUri.Host}\r\n"; } if (text != null && !headers.Any(header => header.ToLower().StartsWith("content-length:"))) { content += $"Content-Length: {text.Length}\r\n"; } foreach (string header in headers) { content += header + "\r\n"; } content += "\r\n"; if (!headerOnly && text != null) { content += text; } return Encoding.UTF8.GetBytes(content); } public byte[] GetContent(string requestType, string text, bool headerOnly) { return GetContent("1.1", requestType, query: null, text: text, headers: null, headerOnly: headerOnly); } private static int GetNextPort() { lock (s_nextPortLock) { int port = s_nextPort++; if (s_nextPort > IPEndPoint.MaxPort) { s_nextPort = StartPort; } return port; } } } public static class RequestTypes { public const string POST = "POST"; } }
-1
dotnet/runtime
66,367
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled
When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
jakobbotsch
2022-03-09T00:01:53Z
2022-03-09T12:59:09Z
c9f7f7389e8e9a00d501aef696333b67d218baac
cef825fd5425203ac28d073bf9fccc33c638f179
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled. When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
./src/mono/mono/metadata/mono-perfcounters-def.h
/** * \file * Define the system and runtime performance counters. * Each category is defined with the macro: * PERFCTR_CAT(catid, name, help, type, instances, first_counter_id) * and after that follows the counters inside the category, defined by the macro: * PERFCTR_COUNTER(counter_id, name, help, type, field) * field is the field inside MonoPerfCounters per predefined counters. * Note we set it to unused for unrelated counters: it is unused * in those cases. */ PERFCTR_CAT(CPU, "Processor", "", MultiInstance, CPU, CPU_USER_TIME) PERFCTR_COUNTER(CPU_USER_TIME, "% User Time", "", Timer100Ns, unused) PERFCTR_COUNTER(CPU_PRIV_TIME, "% Privileged Time", "", Timer100Ns, unused) PERFCTR_COUNTER(CPU_INTR_TIME, "% Interrupt Time", "", Timer100Ns, unused) PERFCTR_COUNTER(CPU_DCP_TIME, "% DCP Time", "", Timer100Ns, unused) PERFCTR_COUNTER(CPU_PROC_TIME, "% Processor Time", "", Timer100NsInverse, unused) PERFCTR_CAT(PROC, "Process", "", MultiInstance, Process, PROC_USER_TIME) PERFCTR_COUNTER(PROC_USER_TIME, "% User Time", "", Timer100Ns, unused) PERFCTR_COUNTER(PROC_PRIV_TIME, "% Privileged Time", "", Timer100Ns, unused) PERFCTR_COUNTER(PROC_PROC_TIME, "% Processor Time", "", Timer100Ns, unused) PERFCTR_COUNTER(PROC_THREADS, "Thread Count", "", NumberOfItems64, unused) PERFCTR_COUNTER(PROC_VBYTES, "Virtual Bytes", "", NumberOfItems64, unused) PERFCTR_COUNTER(PROC_WSET, "Working Set", "", NumberOfItems64, unused) PERFCTR_COUNTER(PROC_PBYTES, "Private Bytes", "", NumberOfItems64, unused) /* sample runtime counter */ PERFCTR_CAT(MONO_MEM, "Mono Memory", "", SingleInstance, Mono, MEM_NUM_OBJECTS) PERFCTR_COUNTER(MEM_NUM_OBJECTS, "Allocated Objects", "", NumberOfItems64, unused) PERFCTR_COUNTER(MEM_PHYS_TOTAL, "Total Physical Memory", "Physical memory installed in the machine, in bytes", NumberOfItems64, unused) PERFCTR_COUNTER(MEM_PHYS_AVAILABLE, "Available Physical Memory", "Physical memory available in the machine, in bytes", NumberOfItems64, unused) PERFCTR_CAT(ASPNET, "ASP.NET", "", MultiInstance, Mono, ASPNET_REQ_Q) PERFCTR_COUNTER(ASPNET_REQ_Q, "Requests Queued", "", NumberOfItems64, aspnet_requests_queued) PERFCTR_COUNTER(ASPNET_REQ_TOTAL, "Requests Total", "", NumberOfItems32, aspnet_requests) PERFCTR_COUNTER(ASPNET_REQ_PSEC, "Requests/Sec", "", RateOfCountsPerSecond32, aspnet_requests) PERFCTR_CAT(JIT, ".NET CLR JIT", "", MultiInstance, Mono, JIT_BYTES) PERFCTR_COUNTER(JIT_BYTES, "# of IL Bytes JITted", "", NumberOfItems32, jit_bytes) PERFCTR_COUNTER(JIT_METHODS, "# of IL Methods JITted", "", NumberOfItems32, jit_methods) PERFCTR_COUNTER(JIT_TIME, "% Time in JIT", "", RawFraction, jit_time) PERFCTR_COUNTER(JIT_BYTES_PSEC, "IL Bytes Jitted/Sec", "", RateOfCountsPerSecond32, jit_bytes) PERFCTR_COUNTER(JIT_FAILURES, "Standard Jit Failures", "", NumberOfItems32, jit_failures) PERFCTR_CAT(EXC, ".NET CLR Exceptions", "", MultiInstance, Mono, EXC_THROWN) PERFCTR_COUNTER(EXC_THROWN, "# of Exceps Thrown", "", NumberOfItems32, exceptions_thrown) PERFCTR_COUNTER(EXC_THROWN_PSEC, "# of Exceps Thrown/Sec", "", RateOfCountsPerSecond32, exceptions_thrown) PERFCTR_COUNTER(EXC_FILTERS_PSEC, "# of Filters/Sec", "", RateOfCountsPerSecond32, exceptions_filters) PERFCTR_COUNTER(EXC_FINALLYS_PSEC, "# of Finallys/Sec", "", RateOfCountsPerSecond32, exceptions_finallys) PERFCTR_COUNTER(EXC_CATCH_DEPTH, "Throw to Catch Depth/Sec", "", NumberOfItems32, exceptions_depth) PERFCTR_CAT(GC, ".NET CLR Memory", "", MultiInstance, Mono, GC_GEN0) PERFCTR_COUNTER(GC_GEN0, "# Gen 0 Collections", "", NumberOfItems32, gc_collections0) PERFCTR_COUNTER(GC_GEN1, "# Gen 1 Collections", "", NumberOfItems32, gc_collections1) PERFCTR_COUNTER(GC_GEN2, "# Gen 2 Collections", "", NumberOfItems32, gc_collections2) PERFCTR_COUNTER(GC_PROM0, "Promoted Memory from Gen 0", "", NumberOfItems32, gc_promotions0) PERFCTR_COUNTER(GC_PROM1, "Promoted Memory from Gen 1", "", NumberOfItems32, gc_promotions1) PERFCTR_COUNTER(GC_PROM0SEC, "Gen 0 Promoted Bytes/Sec", "", RateOfCountsPerSecond32, gc_promotions0) PERFCTR_COUNTER(GC_PROM1SEC, "Gen 1 Promoted Bytes/Sec", "", RateOfCountsPerSecond32, gc_promotions1) PERFCTR_COUNTER(GC_PROMFIN, "Promoted Finalization-Memory from Gen 0", "", NumberOfItems32, gc_promotion_finalizers) PERFCTR_COUNTER(GC_GEN0SIZE, "Gen 0 heap size", "", NumberOfItems64, gc_gen0size) PERFCTR_COUNTER(GC_GEN1SIZE, "Gen 1 heap size", "", NumberOfItems64, gc_gen1size) PERFCTR_COUNTER(GC_GEN2SIZE, "Gen 2 heap size", "", NumberOfItems64, gc_gen2size) PERFCTR_COUNTER(GC_LOSIZE, "Large Object Heap size", "", NumberOfItems32, gc_lossize) PERFCTR_COUNTER(GC_FINSURV, "Finalization Survivors", "", NumberOfItems32, gc_fin_survivors) PERFCTR_COUNTER(GC_NHANDLES, "# GC Handles", "", NumberOfItems32, gc_num_handles) PERFCTR_COUNTER(GC_BYTESSEC, "Allocated Bytes/sec", "", RateOfCountsPerSecond32, gc_allocated) PERFCTR_COUNTER(GC_INDGC, "# Induced GC", "", NumberOfItems32, gc_induced) PERFCTR_COUNTER(GC_PERCTIME, "% Time in GC", "", RawFraction, gc_time) PERFCTR_COUNTER(GC_BYTES, "# Bytes in all Heaps", "", NumberOfItems64, gc_total_bytes) PERFCTR_COUNTER(GC_COMMBYTES, "# Total committed Bytes", "", NumberOfItems64, gc_committed_bytes) PERFCTR_COUNTER(GC_RESBYTES, "# Total reserved Bytes", "", NumberOfItems64, gc_reserved_bytes) PERFCTR_COUNTER(GC_PINNED, "# of Pinned Objects", "", NumberOfItems32, gc_num_pinned) PERFCTR_COUNTER(GC_SYNKB, "# of Sink Blocks in use", "", NumberOfItems32, gc_sync_blocks) PERFCTR_CAT(LOADING, ".NET CLR Loading", "", MultiInstance, Mono, LOADING_CLASSES) PERFCTR_COUNTER(LOADING_CLASSES, "Current Classes Loaded", "", NumberOfItems32, loader_classes) PERFCTR_COUNTER(LOADING_TOTCLASSES, "Total Classes Loaded", "", NumberOfItems32, loader_total_classes) PERFCTR_COUNTER(LOADING_CLASSESSEC, "Rate of Classes Loaded", "", RateOfCountsPerSecond32, loader_total_classes) PERFCTR_COUNTER(LOADING_APPDOMAINS, "Current appdomains", "", NumberOfItems32, loader_appdomains) PERFCTR_COUNTER(LOADING_TOTAPPDOMAINS, "Total Appdomains", "", NumberOfItems32, loader_total_appdomains) PERFCTR_COUNTER(LOADING_APPDOMAINSEC, "Rate of appdomains", "", RateOfCountsPerSecond32, loader_total_appdomains) PERFCTR_COUNTER(LOADING_ASSEMBLIES, "Current Assemblies", "", NumberOfItems32, loader_assemblies) PERFCTR_COUNTER(LOADING_TOTASSEMBLIES, "Total Assemblies", "", NumberOfItems32, loader_total_assemblies) PERFCTR_COUNTER(LOADING_ASSEMBLIESEC, "Rate of Assemblies", "", RateOfCountsPerSecond32, loader_total_assemblies) PERFCTR_COUNTER(LOADING_FAILURES, "Total # of Load Failures", "", NumberOfItems32, loader_failures) PERFCTR_COUNTER(LOADING_FAILURESSEC, "Rate of Load Failures", "", RateOfCountsPerSecond32, loader_failures) PERFCTR_COUNTER(LOADING_BYTES, "Bytes in Loader Heap", "", NumberOfItems32, loader_bytes) PERFCTR_COUNTER(LOADING_APPUNLOADED, "Total appdomains unloaded", "", NumberOfItems32, loader_appdomains_uloaded) PERFCTR_COUNTER(LOADING_APPUNLOADEDSEC, "Rate of appdomains unloaded", "", RateOfCountsPerSecond32, loader_appdomains_uloaded) PERFCTR_CAT(THREAD, ".NET CLR LocksAndThreads", "", MultiInstance, Mono, THREAD_CONTENTIONS) PERFCTR_COUNTER(THREAD_CONTENTIONS, "Total # of Contentions", "", NumberOfItems32, thread_contentions) PERFCTR_COUNTER(THREAD_CONTENTIONSSEC, "Contention Rate / sec", "", RateOfCountsPerSecond32, thread_contentions) PERFCTR_COUNTER(THREAD_QUEUELEN, "Current Queue Length", "", NumberOfItems32, thread_queue_len) PERFCTR_COUNTER(THREAD_QUEUELENP, "Queue Length Peak", "", NumberOfItems32, thread_queue_max) PERFCTR_COUNTER(THREAD_QUEUELENSEC, "Queue Length / sec", "", RateOfCountsPerSecond32, thread_queue_max) PERFCTR_COUNTER(THREAD_NUMLOG, "# of current logical Threads", "", NumberOfItems32, thread_num_logical) PERFCTR_COUNTER(THREAD_NUMPHYS, "# of current physical Threads", "", NumberOfItems32, thread_num_physical) PERFCTR_COUNTER(THREAD_NUMREC, "# of current recognized threads", "", NumberOfItems32, thread_cur_recognized) PERFCTR_COUNTER(THREAD_TOTREC, "# of total recognized threads", "", NumberOfItems32, thread_num_recognized) PERFCTR_COUNTER(THREAD_TOTRECSEC, "rate of recognized threads / sec", "", RateOfCountsPerSecond32, thread_num_recognized) PERFCTR_CAT(INTEROP, ".NET CLR Interop", "", MultiInstance, Mono, INTEROP_NUMCCW) PERFCTR_COUNTER(INTEROP_NUMCCW, "# of CCWs", "", NumberOfItems32, interop_num_ccw) PERFCTR_COUNTER(INTEROP_STUBS, "# of Stubs", "", NumberOfItems32, interop_num_stubs) PERFCTR_COUNTER(INTEROP_MARSH, "# of marshalling", "", NumberOfItems32, interop_num_marshals) PERFCTR_CAT(SECURITY, ".NET CLR Security", "", MultiInstance, Mono, SECURITY_CHECKS) PERFCTR_COUNTER(SECURITY_CHECKS, "Total Runtime Checks", "", NumberOfItems32, security_num_checks) PERFCTR_COUNTER(SECURITY_LCHECKS, "# Link Time Checks", "", NumberOfItems32, security_num_link_checks) PERFCTR_COUNTER(SECURITY_PERCTIME, "% Time in RT checks", "", RawFraction, security_time) PERFCTR_COUNTER(SECURITY_SWDEPTH, "Stack Walk Depth", "", NumberOfItems32, security_depth) PERFCTR_CAT(THREADPOOL, "Mono Threadpool", "", MultiInstance, Mono, THREADPOOL_WORKITEMS) PERFCTR_COUNTER(THREADPOOL_WORKITEMS, "Work Items Added", "", NumberOfItems64, threadpool_workitems) PERFCTR_COUNTER(THREADPOOL_WORKITEMS_PSEC, "Work Items Added/Sec", "", RateOfCountsPerSecond32, threadpool_workitems) PERFCTR_COUNTER(THREADPOOL_IOWORKITEMS, "IO Work Items Added", "", NumberOfItems64, threadpool_ioworkitems) PERFCTR_COUNTER(THREADPOOL_IOWORKITEMS_PSEC, "IO Work Items Added/Sec", "", RateOfCountsPerSecond32, threadpool_ioworkitems) PERFCTR_COUNTER(THREADPOOL_THREADS, "# of Threads", "", NumberOfItems32, threadpool_threads) PERFCTR_COUNTER(THREADPOOL_IOTHREADS, "# of IO Threads", "", NumberOfItems32, threadpool_iothreads) PERFCTR_CAT(NETWORK, "Network Interface", "", MultiInstance, NetworkInterface, NETWORK_BYTESRECSEC) PERFCTR_COUNTER(NETWORK_BYTESRECSEC, "Bytes Received/sec", "", RateOfCountsPerSecond64, unused) PERFCTR_COUNTER(NETWORK_BYTESSENTSEC, "Bytes Sent/sec", "", RateOfCountsPerSecond64, unused) PERFCTR_COUNTER(NETWORK_BYTESTOTALSEC, "Bytes Total/sec", "", RateOfCountsPerSecond64, unused)
/** * \file * Define the system and runtime performance counters. * Each category is defined with the macro: * PERFCTR_CAT(catid, name, help, type, instances, first_counter_id) * and after that follows the counters inside the category, defined by the macro: * PERFCTR_COUNTER(counter_id, name, help, type, field) * field is the field inside MonoPerfCounters per predefined counters. * Note we set it to unused for unrelated counters: it is unused * in those cases. */ PERFCTR_CAT(CPU, "Processor", "", MultiInstance, CPU, CPU_USER_TIME) PERFCTR_COUNTER(CPU_USER_TIME, "% User Time", "", Timer100Ns, unused) PERFCTR_COUNTER(CPU_PRIV_TIME, "% Privileged Time", "", Timer100Ns, unused) PERFCTR_COUNTER(CPU_INTR_TIME, "% Interrupt Time", "", Timer100Ns, unused) PERFCTR_COUNTER(CPU_DCP_TIME, "% DCP Time", "", Timer100Ns, unused) PERFCTR_COUNTER(CPU_PROC_TIME, "% Processor Time", "", Timer100NsInverse, unused) PERFCTR_CAT(PROC, "Process", "", MultiInstance, Process, PROC_USER_TIME) PERFCTR_COUNTER(PROC_USER_TIME, "% User Time", "", Timer100Ns, unused) PERFCTR_COUNTER(PROC_PRIV_TIME, "% Privileged Time", "", Timer100Ns, unused) PERFCTR_COUNTER(PROC_PROC_TIME, "% Processor Time", "", Timer100Ns, unused) PERFCTR_COUNTER(PROC_THREADS, "Thread Count", "", NumberOfItems64, unused) PERFCTR_COUNTER(PROC_VBYTES, "Virtual Bytes", "", NumberOfItems64, unused) PERFCTR_COUNTER(PROC_WSET, "Working Set", "", NumberOfItems64, unused) PERFCTR_COUNTER(PROC_PBYTES, "Private Bytes", "", NumberOfItems64, unused) /* sample runtime counter */ PERFCTR_CAT(MONO_MEM, "Mono Memory", "", SingleInstance, Mono, MEM_NUM_OBJECTS) PERFCTR_COUNTER(MEM_NUM_OBJECTS, "Allocated Objects", "", NumberOfItems64, unused) PERFCTR_COUNTER(MEM_PHYS_TOTAL, "Total Physical Memory", "Physical memory installed in the machine, in bytes", NumberOfItems64, unused) PERFCTR_COUNTER(MEM_PHYS_AVAILABLE, "Available Physical Memory", "Physical memory available in the machine, in bytes", NumberOfItems64, unused) PERFCTR_CAT(ASPNET, "ASP.NET", "", MultiInstance, Mono, ASPNET_REQ_Q) PERFCTR_COUNTER(ASPNET_REQ_Q, "Requests Queued", "", NumberOfItems64, aspnet_requests_queued) PERFCTR_COUNTER(ASPNET_REQ_TOTAL, "Requests Total", "", NumberOfItems32, aspnet_requests) PERFCTR_COUNTER(ASPNET_REQ_PSEC, "Requests/Sec", "", RateOfCountsPerSecond32, aspnet_requests) PERFCTR_CAT(JIT, ".NET CLR JIT", "", MultiInstance, Mono, JIT_BYTES) PERFCTR_COUNTER(JIT_BYTES, "# of IL Bytes JITted", "", NumberOfItems32, jit_bytes) PERFCTR_COUNTER(JIT_METHODS, "# of IL Methods JITted", "", NumberOfItems32, jit_methods) PERFCTR_COUNTER(JIT_TIME, "% Time in JIT", "", RawFraction, jit_time) PERFCTR_COUNTER(JIT_BYTES_PSEC, "IL Bytes Jitted/Sec", "", RateOfCountsPerSecond32, jit_bytes) PERFCTR_COUNTER(JIT_FAILURES, "Standard Jit Failures", "", NumberOfItems32, jit_failures) PERFCTR_CAT(EXC, ".NET CLR Exceptions", "", MultiInstance, Mono, EXC_THROWN) PERFCTR_COUNTER(EXC_THROWN, "# of Exceps Thrown", "", NumberOfItems32, exceptions_thrown) PERFCTR_COUNTER(EXC_THROWN_PSEC, "# of Exceps Thrown/Sec", "", RateOfCountsPerSecond32, exceptions_thrown) PERFCTR_COUNTER(EXC_FILTERS_PSEC, "# of Filters/Sec", "", RateOfCountsPerSecond32, exceptions_filters) PERFCTR_COUNTER(EXC_FINALLYS_PSEC, "# of Finallys/Sec", "", RateOfCountsPerSecond32, exceptions_finallys) PERFCTR_COUNTER(EXC_CATCH_DEPTH, "Throw to Catch Depth/Sec", "", NumberOfItems32, exceptions_depth) PERFCTR_CAT(GC, ".NET CLR Memory", "", MultiInstance, Mono, GC_GEN0) PERFCTR_COUNTER(GC_GEN0, "# Gen 0 Collections", "", NumberOfItems32, gc_collections0) PERFCTR_COUNTER(GC_GEN1, "# Gen 1 Collections", "", NumberOfItems32, gc_collections1) PERFCTR_COUNTER(GC_GEN2, "# Gen 2 Collections", "", NumberOfItems32, gc_collections2) PERFCTR_COUNTER(GC_PROM0, "Promoted Memory from Gen 0", "", NumberOfItems32, gc_promotions0) PERFCTR_COUNTER(GC_PROM1, "Promoted Memory from Gen 1", "", NumberOfItems32, gc_promotions1) PERFCTR_COUNTER(GC_PROM0SEC, "Gen 0 Promoted Bytes/Sec", "", RateOfCountsPerSecond32, gc_promotions0) PERFCTR_COUNTER(GC_PROM1SEC, "Gen 1 Promoted Bytes/Sec", "", RateOfCountsPerSecond32, gc_promotions1) PERFCTR_COUNTER(GC_PROMFIN, "Promoted Finalization-Memory from Gen 0", "", NumberOfItems32, gc_promotion_finalizers) PERFCTR_COUNTER(GC_GEN0SIZE, "Gen 0 heap size", "", NumberOfItems64, gc_gen0size) PERFCTR_COUNTER(GC_GEN1SIZE, "Gen 1 heap size", "", NumberOfItems64, gc_gen1size) PERFCTR_COUNTER(GC_GEN2SIZE, "Gen 2 heap size", "", NumberOfItems64, gc_gen2size) PERFCTR_COUNTER(GC_LOSIZE, "Large Object Heap size", "", NumberOfItems32, gc_lossize) PERFCTR_COUNTER(GC_FINSURV, "Finalization Survivors", "", NumberOfItems32, gc_fin_survivors) PERFCTR_COUNTER(GC_NHANDLES, "# GC Handles", "", NumberOfItems32, gc_num_handles) PERFCTR_COUNTER(GC_BYTESSEC, "Allocated Bytes/sec", "", RateOfCountsPerSecond32, gc_allocated) PERFCTR_COUNTER(GC_INDGC, "# Induced GC", "", NumberOfItems32, gc_induced) PERFCTR_COUNTER(GC_PERCTIME, "% Time in GC", "", RawFraction, gc_time) PERFCTR_COUNTER(GC_BYTES, "# Bytes in all Heaps", "", NumberOfItems64, gc_total_bytes) PERFCTR_COUNTER(GC_COMMBYTES, "# Total committed Bytes", "", NumberOfItems64, gc_committed_bytes) PERFCTR_COUNTER(GC_RESBYTES, "# Total reserved Bytes", "", NumberOfItems64, gc_reserved_bytes) PERFCTR_COUNTER(GC_PINNED, "# of Pinned Objects", "", NumberOfItems32, gc_num_pinned) PERFCTR_COUNTER(GC_SYNKB, "# of Sink Blocks in use", "", NumberOfItems32, gc_sync_blocks) PERFCTR_CAT(LOADING, ".NET CLR Loading", "", MultiInstance, Mono, LOADING_CLASSES) PERFCTR_COUNTER(LOADING_CLASSES, "Current Classes Loaded", "", NumberOfItems32, loader_classes) PERFCTR_COUNTER(LOADING_TOTCLASSES, "Total Classes Loaded", "", NumberOfItems32, loader_total_classes) PERFCTR_COUNTER(LOADING_CLASSESSEC, "Rate of Classes Loaded", "", RateOfCountsPerSecond32, loader_total_classes) PERFCTR_COUNTER(LOADING_APPDOMAINS, "Current appdomains", "", NumberOfItems32, loader_appdomains) PERFCTR_COUNTER(LOADING_TOTAPPDOMAINS, "Total Appdomains", "", NumberOfItems32, loader_total_appdomains) PERFCTR_COUNTER(LOADING_APPDOMAINSEC, "Rate of appdomains", "", RateOfCountsPerSecond32, loader_total_appdomains) PERFCTR_COUNTER(LOADING_ASSEMBLIES, "Current Assemblies", "", NumberOfItems32, loader_assemblies) PERFCTR_COUNTER(LOADING_TOTASSEMBLIES, "Total Assemblies", "", NumberOfItems32, loader_total_assemblies) PERFCTR_COUNTER(LOADING_ASSEMBLIESEC, "Rate of Assemblies", "", RateOfCountsPerSecond32, loader_total_assemblies) PERFCTR_COUNTER(LOADING_FAILURES, "Total # of Load Failures", "", NumberOfItems32, loader_failures) PERFCTR_COUNTER(LOADING_FAILURESSEC, "Rate of Load Failures", "", RateOfCountsPerSecond32, loader_failures) PERFCTR_COUNTER(LOADING_BYTES, "Bytes in Loader Heap", "", NumberOfItems32, loader_bytes) PERFCTR_COUNTER(LOADING_APPUNLOADED, "Total appdomains unloaded", "", NumberOfItems32, loader_appdomains_uloaded) PERFCTR_COUNTER(LOADING_APPUNLOADEDSEC, "Rate of appdomains unloaded", "", RateOfCountsPerSecond32, loader_appdomains_uloaded) PERFCTR_CAT(THREAD, ".NET CLR LocksAndThreads", "", MultiInstance, Mono, THREAD_CONTENTIONS) PERFCTR_COUNTER(THREAD_CONTENTIONS, "Total # of Contentions", "", NumberOfItems32, thread_contentions) PERFCTR_COUNTER(THREAD_CONTENTIONSSEC, "Contention Rate / sec", "", RateOfCountsPerSecond32, thread_contentions) PERFCTR_COUNTER(THREAD_QUEUELEN, "Current Queue Length", "", NumberOfItems32, thread_queue_len) PERFCTR_COUNTER(THREAD_QUEUELENP, "Queue Length Peak", "", NumberOfItems32, thread_queue_max) PERFCTR_COUNTER(THREAD_QUEUELENSEC, "Queue Length / sec", "", RateOfCountsPerSecond32, thread_queue_max) PERFCTR_COUNTER(THREAD_NUMLOG, "# of current logical Threads", "", NumberOfItems32, thread_num_logical) PERFCTR_COUNTER(THREAD_NUMPHYS, "# of current physical Threads", "", NumberOfItems32, thread_num_physical) PERFCTR_COUNTER(THREAD_NUMREC, "# of current recognized threads", "", NumberOfItems32, thread_cur_recognized) PERFCTR_COUNTER(THREAD_TOTREC, "# of total recognized threads", "", NumberOfItems32, thread_num_recognized) PERFCTR_COUNTER(THREAD_TOTRECSEC, "rate of recognized threads / sec", "", RateOfCountsPerSecond32, thread_num_recognized) PERFCTR_CAT(INTEROP, ".NET CLR Interop", "", MultiInstance, Mono, INTEROP_NUMCCW) PERFCTR_COUNTER(INTEROP_NUMCCW, "# of CCWs", "", NumberOfItems32, interop_num_ccw) PERFCTR_COUNTER(INTEROP_STUBS, "# of Stubs", "", NumberOfItems32, interop_num_stubs) PERFCTR_COUNTER(INTEROP_MARSH, "# of marshalling", "", NumberOfItems32, interop_num_marshals) PERFCTR_CAT(SECURITY, ".NET CLR Security", "", MultiInstance, Mono, SECURITY_CHECKS) PERFCTR_COUNTER(SECURITY_CHECKS, "Total Runtime Checks", "", NumberOfItems32, security_num_checks) PERFCTR_COUNTER(SECURITY_LCHECKS, "# Link Time Checks", "", NumberOfItems32, security_num_link_checks) PERFCTR_COUNTER(SECURITY_PERCTIME, "% Time in RT checks", "", RawFraction, security_time) PERFCTR_COUNTER(SECURITY_SWDEPTH, "Stack Walk Depth", "", NumberOfItems32, security_depth) PERFCTR_CAT(THREADPOOL, "Mono Threadpool", "", MultiInstance, Mono, THREADPOOL_WORKITEMS) PERFCTR_COUNTER(THREADPOOL_WORKITEMS, "Work Items Added", "", NumberOfItems64, threadpool_workitems) PERFCTR_COUNTER(THREADPOOL_WORKITEMS_PSEC, "Work Items Added/Sec", "", RateOfCountsPerSecond32, threadpool_workitems) PERFCTR_COUNTER(THREADPOOL_IOWORKITEMS, "IO Work Items Added", "", NumberOfItems64, threadpool_ioworkitems) PERFCTR_COUNTER(THREADPOOL_IOWORKITEMS_PSEC, "IO Work Items Added/Sec", "", RateOfCountsPerSecond32, threadpool_ioworkitems) PERFCTR_COUNTER(THREADPOOL_THREADS, "# of Threads", "", NumberOfItems32, threadpool_threads) PERFCTR_COUNTER(THREADPOOL_IOTHREADS, "# of IO Threads", "", NumberOfItems32, threadpool_iothreads) PERFCTR_CAT(NETWORK, "Network Interface", "", MultiInstance, NetworkInterface, NETWORK_BYTESRECSEC) PERFCTR_COUNTER(NETWORK_BYTESRECSEC, "Bytes Received/sec", "", RateOfCountsPerSecond64, unused) PERFCTR_COUNTER(NETWORK_BYTESSENTSEC, "Bytes Sent/sec", "", RateOfCountsPerSecond64, unused) PERFCTR_COUNTER(NETWORK_BYTESTOTALSEC, "Bytes Total/sec", "", RateOfCountsPerSecond64, unused)
-1
dotnet/runtime
66,367
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled
When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
jakobbotsch
2022-03-09T00:01:53Z
2022-03-09T12:59:09Z
c9f7f7389e8e9a00d501aef696333b67d218baac
cef825fd5425203ac28d073bf9fccc33c638f179
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled. When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
./src/libraries/System.Net.WebSockets/tests/WebSocketDeflateOptionsTests.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using Xunit; namespace System.Net.WebSockets.Tests { public class WebSocketDeflateOptionsTests { [Fact] public void ClientMaxWindowBits() { WebSocketDeflateOptions options = new(); Assert.Equal(15, options.ClientMaxWindowBits); Assert.Throws<ArgumentOutOfRangeException>(() => options.ClientMaxWindowBits = 8); Assert.Throws<ArgumentOutOfRangeException>(() => options.ClientMaxWindowBits = 16); options.ClientMaxWindowBits = 14; Assert.Equal(14, options.ClientMaxWindowBits); } [Fact] public void ServerMaxWindowBits() { WebSocketDeflateOptions options = new(); Assert.Equal(15, options.ServerMaxWindowBits); Assert.Throws<ArgumentOutOfRangeException>(() => options.ServerMaxWindowBits = 8); Assert.Throws<ArgumentOutOfRangeException>(() => options.ServerMaxWindowBits = 16); options.ServerMaxWindowBits = 14; Assert.Equal(14, options.ServerMaxWindowBits); } [Fact] public void ContextTakeover() { WebSocketDeflateOptions options = new(); Assert.True(options.ClientContextTakeover); Assert.True(options.ServerContextTakeover); options.ClientContextTakeover = false; Assert.False(options.ClientContextTakeover); options.ServerContextTakeover = false; Assert.False(options.ServerContextTakeover); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using Xunit; namespace System.Net.WebSockets.Tests { public class WebSocketDeflateOptionsTests { [Fact] public void ClientMaxWindowBits() { WebSocketDeflateOptions options = new(); Assert.Equal(15, options.ClientMaxWindowBits); Assert.Throws<ArgumentOutOfRangeException>(() => options.ClientMaxWindowBits = 8); Assert.Throws<ArgumentOutOfRangeException>(() => options.ClientMaxWindowBits = 16); options.ClientMaxWindowBits = 14; Assert.Equal(14, options.ClientMaxWindowBits); } [Fact] public void ServerMaxWindowBits() { WebSocketDeflateOptions options = new(); Assert.Equal(15, options.ServerMaxWindowBits); Assert.Throws<ArgumentOutOfRangeException>(() => options.ServerMaxWindowBits = 8); Assert.Throws<ArgumentOutOfRangeException>(() => options.ServerMaxWindowBits = 16); options.ServerMaxWindowBits = 14; Assert.Equal(14, options.ServerMaxWindowBits); } [Fact] public void ContextTakeover() { WebSocketDeflateOptions options = new(); Assert.True(options.ClientContextTakeover); Assert.True(options.ServerContextTakeover); options.ClientContextTakeover = false; Assert.False(options.ClientContextTakeover); options.ServerContextTakeover = false; Assert.False(options.ServerContextTakeover); } } }
-1
dotnet/runtime
66,367
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled
When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
jakobbotsch
2022-03-09T00:01:53Z
2022-03-09T12:59:09Z
c9f7f7389e8e9a00d501aef696333b67d218baac
cef825fd5425203ac28d073bf9fccc33c638f179
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled. When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
./src/tests/JIT/Regression/JitBlue/DevDiv_491211/DevDiv_491211.ilproj
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>None</DebugType> <Optimize>True</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="$(MSBuildProjectName).il" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>None</DebugType> <Optimize>True</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="$(MSBuildProjectName).il" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,367
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled
When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
jakobbotsch
2022-03-09T00:01:53Z
2022-03-09T12:59:09Z
c9f7f7389e8e9a00d501aef696333b67d218baac
cef825fd5425203ac28d073bf9fccc33c638f179
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled. When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
./src/libraries/System.Reflection.MetadataLoadContext/tests/src/Tests/Event/EventTests.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using SampleMetadata; using Xunit; namespace System.Reflection.Tests { public static partial class EventTests { [Fact] public static void EventTest1() { Type t = typeof(DerivedFromEventHolder1<int>).Project(); Type rt = t; Type dt = t.BaseType; EventInfo e = t.GetEvent("MyEvent"); string es = e.ToString(); Assert.Equal(typeof(Action<>).Project().MakeGenericType(typeof(int).Project()), e.EventHandlerType); Assert.Equal(t.Module, e.Module); Assert.Equal(dt, e.DeclaringType); Assert.Equal(rt, e.ReflectedType); MethodInfo adder = e.AddMethod; Assert.Equal("add_MyEvent", adder.Name); Assert.Equal(dt, adder.DeclaringType); Assert.Equal(rt, adder.ReflectedType); MethodInfo remover = e.RemoveMethod; Assert.Equal("remove_MyEvent", remover.Name); Assert.Equal(dt, remover.DeclaringType); Assert.Equal(rt, remover.ReflectedType); Assert.Null(e.RaiseMethod); Assert.Equal(0, e.GetOtherMethods(nonPublic: true).Length); Assert.Equal(0, e.GetOtherMethods(nonPublic: false).Length); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using SampleMetadata; using Xunit; namespace System.Reflection.Tests { public static partial class EventTests { [Fact] public static void EventTest1() { Type t = typeof(DerivedFromEventHolder1<int>).Project(); Type rt = t; Type dt = t.BaseType; EventInfo e = t.GetEvent("MyEvent"); string es = e.ToString(); Assert.Equal(typeof(Action<>).Project().MakeGenericType(typeof(int).Project()), e.EventHandlerType); Assert.Equal(t.Module, e.Module); Assert.Equal(dt, e.DeclaringType); Assert.Equal(rt, e.ReflectedType); MethodInfo adder = e.AddMethod; Assert.Equal("add_MyEvent", adder.Name); Assert.Equal(dt, adder.DeclaringType); Assert.Equal(rt, adder.ReflectedType); MethodInfo remover = e.RemoveMethod; Assert.Equal("remove_MyEvent", remover.Name); Assert.Equal(dt, remover.DeclaringType); Assert.Equal(rt, remover.ReflectedType); Assert.Null(e.RaiseMethod); Assert.Equal(0, e.GetOtherMethods(nonPublic: true).Length); Assert.Equal(0, e.GetOtherMethods(nonPublic: false).Length); } } }
-1
dotnet/runtime
66,367
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled
When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
jakobbotsch
2022-03-09T00:01:53Z
2022-03-09T12:59:09Z
c9f7f7389e8e9a00d501aef696333b67d218baac
cef825fd5425203ac28d073bf9fccc33c638f179
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled. When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
./src/libraries/System.Data.Common/src/System.Data.Common.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <AllowUnsafeBlocks>true</AllowUnsafeBlocks> <TargetFramework>$(NetCoreAppCurrent)</TargetFramework> <Nullable>enable</Nullable> <NoWarn>$(NoWarn);SYSLIB0038</NoWarn> </PropertyGroup> <ItemGroup> <Compile Include="System.Data.Common.TypeForwards.cs" /> <Compile Include="System\Data\DataReaderExtensions.cs" /> <Compile Include="System\HResults.cs" /> <Compile Include="System\Xml\TreeIterator.cs" /> <Compile Include="System\Xml\XmlBoundElement.cs" /> <Compile Include="System\Xml\XmlDataDocument.cs" /> <Compile Include="System\Xml\XmlDataImplementation.cs" /> <Compile Include="System\Xml\XPathNodePointer.cs" /> <Compile Include="System\Xml\BaseTreeIterator.cs" /> <Compile Include="System\Xml\DataDocumentXPathNavigator.cs" /> <Compile Include="System\Xml\DataPointer.cs" /> <Compile Include="System\Xml\DataSetMappper.cs" /> <Compile Include="System\Xml\IXmlDataVirtualNode.cs" /> <Compile Include="System\Xml\RegionIterator.cs" /> <Compile Include="System\Data\updatestatus.cs" /> <Compile Include="System\Data\XDRSchema.cs" /> <Compile Include="System\Data\XmlDataLoader.cs" /> <Compile Include="System\Data\XMLDiffLoader.cs" /> <Compile Include="System\Data\XmlKeywords.cs" /> <Compile Include="System\Data\XmlReadMode.cs" /> <Compile Include="System\Data\xmlsaver.cs" /> <Compile Include="System\Data\XMLSchema.cs" /> <Compile Include="System\Data\XmlToDatasetMap.cs" /> <Compile Include="System\Data\XmlWriteMode.cs" /> <Compile Include="System\Data\AcceptRejectRule.cs" /> <Compile Include="System\Data\AggregateType.cs" /> <Compile Include="System\Data\BaseCollection.cs" /> <Compile Include="System\Data\CatalogLocation.cs" /> <Compile Include="System\Data\ColumnTypeConverter.cs" /> <Compile Include="System\Data\CommandBehavior.cs" /> <Compile Include="System\Data\CommandType.cs" /> <Compile Include="System\Data\ConflictOptions.cs" /> <Compile Include="System\Data\ConnectionState.cs" /> <Compile Include="System\Data\Constraint.cs" /> <Compile Include="System\Data\ConstraintCollection.cs" /> <Compile Include="System\Data\ConstraintConverter.cs" /> <Compile Include="System\Data\ConstraintEnumerator.cs" /> <Compile Include="System\Data\DataColumn.cs"> <SubType>Component</SubType> </Compile> <Compile Include="System\Data\DataColumnChangeEvent.cs" /> <Compile Include="System\Data\DataColumnChangeEventHandler.cs" /> <Compile Include="System\Data\DataColumnCollection.cs" /> <Compile Include="System\Data\DataColumnPropertyDescriptor.cs" /> <Compile Include="System\Data\DataError.cs" /> <Compile Include="System\Data\DataException.cs" /> <Compile Include="System\Data\DataKey.cs" /> <Compile Include="System\Data\DataRelation.cs" /> <Compile Include="System\Data\DataRelationCollection.cs" /> <Compile Include="System\Data\DataRelationPropertyDescriptor.cs" /> <Compile Include="System\Data\DataRow.cs" /> <Compile Include="System\Data\DataRowAction.cs" /> <Compile Include="System\Data\DataRowChangeEvent.cs" /> <Compile Include="System\Data\DataRowChangeEventHandler.cs" /> <Compile Include="System\Data\DataRowCollection.cs" /> <Compile Include="System\Data\DataRowComparer.cs" /> <Compile Include="System\Data\DataRowCreatedEventHandler.cs" /> <Compile Include="System\Data\DataRowExtensions.cs" /> <Compile Include="System\Data\DataRowState.cs" /> <Compile Include="System\Data\DataRowVersion.cs" /> <Compile Include="System\Data\DataRowView.cs" /> <Compile Include="System\Data\DataSerializationFormat.cs" /> <Compile Include="System\Data\DataSet.cs"> <SubType>Component</SubType> </Compile> <Compile Include="System\Data\DataSetUtil.cs" /> <Compile Include="System\Data\DataSetDateTime.cs" /> <Compile Include="System\Data\DataSysAttribute.cs" /> <Compile Include="System\Data\DataTable.cs"> <SubType>Component</SubType> </Compile> <Compile Include="System\Data\DataTableExtensions.cs" /> <Compile Include="System\Data\DataTableClearEvent.cs" /> <Compile Include="System\Data\DataTableClearEventHandler.cs" /> <Compile Include="System\Data\DataTableCollection.cs" /> <Compile Include="System\Data\DataTableNewRowEvent.cs" /> <Compile Include="System\Data\DataTableNewRowEventHandler.cs" /> <Compile Include="System\Data\DataTablePropertyDescriptor.cs" /> <Compile Include="System\Data\DataTableReader.cs" /> <Compile Include="System\Data\DataTableReaderListener.cs" /> <Compile Include="System\Data\DataTableTypeConverter.cs" /> <Compile Include="System\Data\DataView.cs"> <SubType>Component</SubType> </Compile> <Compile Include="System\Data\DataViewListener.cs" /> <Compile Include="System\Data\DataViewManager.cs"> <SubType>Component</SubType> </Compile> <Compile Include="System\Data\DataViewManagerListItemTypeDescriptor.cs" /> <Compile Include="System\Data\DataViewRowState.cs" /> <Compile Include="System\Data\DataViewSetting.cs" /> <Compile Include="System\Data\DataViewSettingCollection.cs" /> <Compile Include="System\Data\DBConcurrencyException.cs" /> <Compile Include="System\Data\DbType.cs" /> <Compile Include="System\Data\DefaultValueTypeConverter.cs" /> <Compile Include="System\Data\EnumerableRowCollection.cs" /> <Compile Include="System\Data\EnumerableRowCollectionExtensions.cs" /> <Compile Include="System\Data\FillErrorEventArgs.cs" /> <Compile Include="System\Data\FillErrorEventHandler.cs" /> <Compile Include="System\Data\ForeignKeyConstraint.cs" /> <Compile Include="System\Data\IColumnMapping.cs" /> <Compile Include="System\Data\IColumnMappingCollection.cs" /> <Compile Include="System\Data\IDataAdapter.cs" /> <Compile Include="System\Data\IDataParameter.cs" /> <Compile Include="System\Data\IDataParameterCollection.cs" /> <Compile Include="System\Data\IDataReader.cs" /> <Compile Include="System\Data\IDataRecord.cs" /> <Compile Include="System\Data\IDbCommand.cs" /> <Compile Include="System\Data\IDbConnection.cs" /> <Compile Include="System\Data\IDbDataAdapter.cs" /> <Compile Include="System\Data\IDbDataParameter.cs" /> <Compile Include="System\Data\IDbTransaction.cs" /> <Compile Include="System\Data\IsolationLevel.cs" /> <Compile Include="System\Data\ITableMapping.cs" /> <Compile Include="System\Data\ITableMappingCollection.cs" /> <Compile Include="System\Data\KeyRestrictionBehavior.cs" /> <Compile Include="System\Data\LinqDataView.cs" /> <Compile Include="System\Data\LoadOption.cs" /> <Compile Include="System\Data\LocalAppContextSwitches.cs" /> <Compile Include="$(CommonPath)System\LocalAppContextSwitches.Common.cs"> <Link>Common\System\LocalAppContextSwitches.Common.cs</Link> </Compile> <Compile Include="System\Data\MappingType.cs" /> <Compile Include="System\Data\MergeFailedEvent.cs" /> <Compile Include="System\Data\MergeFailedEventHandler.cs" /> <Compile Include="System\Data\Merger.cs" /> <Compile Include="System\Data\MissingMappingAction.cs" /> <Compile Include="System\Data\MissingSchemaAction.cs" /> <Compile Include="System\Data\OrderedEnumerableRowCollection.cs" /> <Compile Include="System\Data\ParameterDirection.cs" /> <Compile Include="System\Data\PrimaryKeyTypeConverter.cs" /> <Compile Include="System\Data\PropertyCollection.cs" /> <Compile Include="System\Data\Range.cs" /> <Compile Include="System\Data\RbTree.cs" /> <Compile Include="System\Data\RecordManager.cs" /> <Compile Include="System\Data\StatementCompletedEventArgs.cs" /> <Compile Include="System\Data\StatementCompletedEventHandler.cs" /> <Compile Include="System\Data\RelatedView.cs"> <SubType>Component</SubType> </Compile> <Compile Include="System\Data\RelationshipConverter.cs" /> <Compile Include="System\Data\Rule.cs" /> <Compile Include="System\Data\SchemaSerializationMode.cs" /> <Compile Include="System\Data\SchemaType.cs" /> <Compile Include="System\Data\Select.cs" /> <Compile Include="System\Data\Selection.cs" /> <Compile Include="System\Data\SimpleType.cs" /> <Compile Include="System\Data\SortExpressionBuilder.cs" /> <Compile Include="System\Data\SqlDbType.cs" /> <Compile Include="System\Data\StateChangeEvent.cs" /> <Compile Include="System\Data\StateChangeEventHandler.cs" /> <Compile Include="System\Data\StatementType.cs" /> <Compile Include="System\Data\StrongTypingException.cs" /> <Compile Include="System\Data\TypedTableBase.cs" /> <Compile Include="System\Data\TypedTableBaseExtensions.cs" /> <Compile Include="System\Data\TypeLimiter.cs" /> <Compile Include="System\Data\UniqueConstraint.cs" /> <Compile Include="System\Data\UpdateRowSource.cs" /> <Compile Include="System\Data\Common\UInt64Storage.cs" /> <Compile Include="System\Data\Common\AdapterUtil.Common.cs" /> <Compile Include="$(CommonPath)System\Data\Common\AdapterUtil.cs" Link="System\Data\Common\AdapterUtil.cs" /> <Compile Include="System\Data\Common\BigIntegerStorage.cs" /> <Compile Include="System\Data\Common\BooleanStorage.cs" /> <Compile Include="System\Data\Common\ByteStorage.cs" /> <Compile Include="System\Data\Common\CharStorage.cs" /> <Compile Include="System\Data\Common\DataAdapter.cs"> <SubType>Component</SubType> </Compile> <Compile Include="System\Data\Common\DataColumnMapping.cs" /> <Compile Include="System\Data\Common\DataColumnMappingCollection.cs" /> <Compile Include="System\Data\Common\DataCommonEventSource.cs" /> <Compile Include="System\Data\Common\DataRecordInternal.cs" /> <Compile Include="System\Data\Common\DataStorage.cs" /> <Compile Include="System\Data\Common\DataTableMapping.cs" /> <Compile Include="System\Data\Common\DataTableMappingCollection.cs" /> <Compile Include="System\Data\Common\DateTimeOffsetStorage.cs" /> <Compile Include="System\Data\Common\DateTimeStorage.cs" /> <Compile Include="System\Data\Common\DbBatch.cs" /> <Compile Include="System\Data\Common\DbBatchCommand.cs" /> <Compile Include="System\Data\Common\DbBatchCommandCollection.cs" /> <Compile Include="System\Data\Common\DbColumn.cs" /> <Compile Include="System\Data\Common\DbCommand.cs"> <SubType>Component</SubType> </Compile> <Compile Include="System\Data\Common\DBCommandBuilder.cs"> <SubType>Component</SubType> </Compile> <Compile Include="System\Data\Common\DbConnection.cs"> <SubType>Component</SubType> </Compile> <Compile Include="System\Data\Common\DbConnectionOptions.cs" /> <Compile Include="$(CommonPath)System\Data\Common\DbConnectionOptions.Common.cs" Link="System\Data\Common\DbConnectionOptions.Common.cs" /> <Compile Include="$(CommonPath)System\Data\Common\DbConnectionPoolKey.cs" Link="System\Data\Common\DbConnectionPoolKey.cs" /> <Compile Include="System\Data\Common\DbConnectionStringBuilder.cs" /> <Compile Include="System\Data\Common\DbConnectionStringCommon.cs" /> <Compile Include="System\Data\Common\DbConnectionStringBuilderDescriptor.cs" /> <Compile Include="System\Data\Common\DbDataAdapter.cs"> <SubType>Component</SubType> </Compile> <Compile Include="System\Data\Common\DbDataReader.cs" /> <Compile Include="System\Data\Common\DbDataReaderExtensions.cs" /> <Compile Include="System\Data\Common\DbDataRecord.cs" /> <Compile Include="System\Data\Common\DbDataSourceEnumerator.cs" /> <Compile Include="System\Data\Common\DbEnumerator.cs" /> <Compile Include="System\Data\Common\DbException.cs" /> <Compile Include="System\Data\Common\DbMetaDataCollectionNames.cs" /> <Compile Include="System\Data\Common\DbMetaDataColumnNames.cs" /> <Compile Include="System\Data\Common\DbParameter.cs" /> <Compile Include="System\Data\Common\DbParameterCollection.cs" /> <Compile Include="System\Data\Common\DbProviderFactory.cs" /> <Compile Include="System\Data\Common\DbProviderFactories.cs" /> <Compile Include="System\Data\Common\DbProviderSpecificTypePropertyAttribute.cs" /> <Compile Include="System\Data\Common\DBSchemaRow.cs" /> <Compile Include="System\Data\Common\DBSchemaTable.cs" /> <Compile Include="System\Data\Common\DbTransaction.cs" /> <Compile Include="System\Data\Common\DecimalStorage.cs" /> <Compile Include="System\Data\Common\DoubleStorage.cs" /> <Compile Include="System\Data\Common\FieldNameLookup.cs" /> <Compile Include="System\Data\Common\Groupbybehavior.cs" /> <Compile Include="System\Data\Common\IDbColumnSchemaGenerator.cs" /> <Compile Include="System\Data\Common\identifiercase.cs" /> <Compile Include="System\Data\Common\Int16Storage.cs" /> <Compile Include="System\Data\Common\Int32Storage.cs" /> <Compile Include="System\Data\Common\Int64Storage.cs" /> <Compile Include="$(CommonPath)System\Data\Common\MultipartIdentifier.cs" Link="System\Data\Common\MultipartIdentifier.cs" /> <Compile Include="$(CommonPath)System\Data\Common\NameValuePair.cs" Link="System\Data\Common\NameValuePair.cs" /> <Compile Include="System\Data\Common\ObjectStorage.cs" /> <Compile Include="System\Data\Common\RowUpdatedEventArgs.cs" /> <Compile Include="System\Data\Common\RowUpdatingEventArgs.cs" /> <Compile Include="System\Data\Common\SByteStorage.cs" /> <Compile Include="System\Data\Common\SchemaTableColumn.cs" /> <Compile Include="System\Data\Common\SchemaTableOptionalColumn.cs" /> <Compile Include="System\Data\Common\SingleStorage.cs" /> <Compile Include="System\Data\Common\SQLConvert.cs" /> <Compile Include="System\Data\Common\SqlUDTStorage.cs" /> <Compile Include="System\Data\Common\StringStorage.cs" /> <Compile Include="System\Data\Common\SupportedJoinOperators.cs" /> <Compile Include="System\Data\Common\TimeSpanStorage.cs" /> <Compile Include="System\Data\Common\UInt16Storage.cs" /> <Compile Include="System\Data\Common\UInt32Storage.cs" /> <Compile Include="System\Data\Common\SQLTypes\SQLCharsStorage.cs" /> <Compile Include="System\Data\Common\SQLTypes\SQLDateTimeStorage.cs" /> <Compile Include="System\Data\Common\SQLTypes\SQLDecimalStorage.cs" /> <Compile Include="System\Data\Common\SQLTypes\SQLDoubleStorage.cs" /> <Compile Include="System\Data\Common\SQLTypes\SQLGuidStorage.cs" /> <Compile Include="System\Data\Common\SQLTypes\SQLInt16Storage.cs" /> <Compile Include="System\Data\Common\SQLTypes\SQLInt32Storage.cs" /> <Compile Include="System\Data\Common\SQLTypes\SQLInt64Storage.cs" /> <Compile Include="System\Data\Common\SQLTypes\SQLMoneyStorage.cs" /> <Compile Include="System\Data\Common\SQLTypes\SQLSingleStorage.cs" /> <Compile Include="System\Data\Common\SQLTypes\SQLStringStorage.cs" /> <Compile Include="System\Data\Common\SQLTypes\SQLBinaryStorage.cs" /> <Compile Include="System\Data\Common\SQLTypes\SQlBooleanStorage.cs" /> <Compile Include="System\Data\Common\SQLTypes\SQLBytesStorage.cs" /> <Compile Include="System\Data\Common\SQLTypes\SQLByteStorage.cs" /> <Compile Include="System\Data\Filter\DataExpression.cs" /> <Compile Include="System\Data\Filter\ExpressionNode.cs" /> <Compile Include="System\Data\Filter\ExpressionParser.cs" /> <Compile Include="System\Data\Filter\FilterException.cs" /> <Compile Include="System\Data\Filter\FunctionNode.cs" /> <Compile Include="System\Data\Filter\IFilter.cs" /> <Compile Include="System\Data\Filter\LookupNode.cs" /> <Compile Include="System\Data\Filter\NameNode.cs" /> <Compile Include="System\Data\Filter\Operators.cs" /> <Compile Include="System\Data\Filter\UnaryNode.cs" /> <Compile Include="System\Data\Filter\ZeroOpNode.cs" /> <Compile Include="System\Data\Filter\AggregateNode.cs" /> <Compile Include="System\Data\Filter\BinaryNode.cs" /> <Compile Include="System\Data\Filter\ConstNode.cs" /> <Compile Include="System\Data\SQLTypes\SQLChars.cs" /> <Compile Include="System\Data\SQLTypes\SQLDateTime.cs" /> <Compile Include="System\Data\SQLTypes\SQLDecimal.cs" /> <Compile Include="System\Data\SQLTypes\SQLDouble.cs" /> <Compile Include="System\Data\SQLTypes\SQLGuid.cs" /> <Compile Include="System\Data\SQLTypes\SQLInt16.cs" /> <Compile Include="System\Data\SQLTypes\SQLInt32.cs" /> <Compile Include="System\Data\SQLTypes\SQLInt64.cs" /> <Compile Include="System\Data\SQLTypes\SQLMoney.cs" /> <Compile Include="$(CommonPath)System\Data\Common\SQLResource.cs" Link="System\Data\SQLTypes\SQLResource.cs" /> <Compile Include="System\Data\SQLTypes\SQLSingle.cs" /> <Compile Include="System\Data\SQLTypes\SQLString.cs" /> <Compile Include="System\Data\SQLTypes\SQLUtility.cs" /> <Compile Include="System\Data\SQLTypes\SqlXml.cs" /> <Compile Include="System\Data\SQLTypes\INullable.cs" /> <Compile Include="System\Data\SQLTypes\SQLBinary.cs" /> <Compile Include="System\Data\SQLTypes\SQLBoolean.cs" /> <Compile Include="System\Data\SQLTypes\SQLByte.cs" /> <Compile Include="System\Data\SQLTypes\SQLBytes.cs" /> <Compile Include="System\Data\ProviderBase\DataReaderContainer.cs" /> <Compile Include="System\Data\ProviderBase\SchemaMapping.cs" /> <Compile Include="$(CommonPath)System\Obsoletions.cs" Link="Common\System\Obsoletions.cs" /> </ItemGroup> <ItemGroup> <ProjectReference Include="$(CoreLibProject)" /> <ProjectReference Include="..\..\System.Collections\src\System.Collections.csproj" /> <ProjectReference Include="..\..\System.Collections.NonGeneric\src\System.Collections.NonGeneric.csproj" /> <ProjectReference Include="..\..\System.ComponentModel.TypeConverter\src\System.ComponentModel.TypeConverter.csproj" /> <ProjectReference Include="..\..\System.Runtime\src\System.Runtime.csproj" /> <ProjectReference Include="..\..\System.Runtime.Extensions\src\System.Runtime.Extensions.csproj" /> <ProjectReference Include="..\..\System.Private.Uri\src\System.Private.Uri.csproj" /> <Reference Include="System.Collections.Concurrent" /> <Reference Include="System.ComponentModel" /> <Reference Include="System.ComponentModel.Primitives" /> <Reference Include="System.Drawing.Primitives" /> <Reference Include="System.Linq" /> <Reference Include="System.Linq.Expressions" /> <Reference Include="System.ObjectModel" /> <Reference Include="System.Runtime.Numerics" /> <Reference Include="System.Runtime.Serialization.Formatters" /> <Reference Include="System.Text.RegularExpressions" /> <Reference Include="System.Transactions.Local" /> <Reference Include="System.Xml.ReaderWriter" /> <Reference Include="System.Xml.XmlSerializer" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <AllowUnsafeBlocks>true</AllowUnsafeBlocks> <TargetFramework>$(NetCoreAppCurrent)</TargetFramework> <Nullable>enable</Nullable> <NoWarn>$(NoWarn);SYSLIB0038</NoWarn> </PropertyGroup> <ItemGroup> <Compile Include="System.Data.Common.TypeForwards.cs" /> <Compile Include="System\Data\DataReaderExtensions.cs" /> <Compile Include="System\HResults.cs" /> <Compile Include="System\Xml\TreeIterator.cs" /> <Compile Include="System\Xml\XmlBoundElement.cs" /> <Compile Include="System\Xml\XmlDataDocument.cs" /> <Compile Include="System\Xml\XmlDataImplementation.cs" /> <Compile Include="System\Xml\XPathNodePointer.cs" /> <Compile Include="System\Xml\BaseTreeIterator.cs" /> <Compile Include="System\Xml\DataDocumentXPathNavigator.cs" /> <Compile Include="System\Xml\DataPointer.cs" /> <Compile Include="System\Xml\DataSetMappper.cs" /> <Compile Include="System\Xml\IXmlDataVirtualNode.cs" /> <Compile Include="System\Xml\RegionIterator.cs" /> <Compile Include="System\Data\updatestatus.cs" /> <Compile Include="System\Data\XDRSchema.cs" /> <Compile Include="System\Data\XmlDataLoader.cs" /> <Compile Include="System\Data\XMLDiffLoader.cs" /> <Compile Include="System\Data\XmlKeywords.cs" /> <Compile Include="System\Data\XmlReadMode.cs" /> <Compile Include="System\Data\xmlsaver.cs" /> <Compile Include="System\Data\XMLSchema.cs" /> <Compile Include="System\Data\XmlToDatasetMap.cs" /> <Compile Include="System\Data\XmlWriteMode.cs" /> <Compile Include="System\Data\AcceptRejectRule.cs" /> <Compile Include="System\Data\AggregateType.cs" /> <Compile Include="System\Data\BaseCollection.cs" /> <Compile Include="System\Data\CatalogLocation.cs" /> <Compile Include="System\Data\ColumnTypeConverter.cs" /> <Compile Include="System\Data\CommandBehavior.cs" /> <Compile Include="System\Data\CommandType.cs" /> <Compile Include="System\Data\ConflictOptions.cs" /> <Compile Include="System\Data\ConnectionState.cs" /> <Compile Include="System\Data\Constraint.cs" /> <Compile Include="System\Data\ConstraintCollection.cs" /> <Compile Include="System\Data\ConstraintConverter.cs" /> <Compile Include="System\Data\ConstraintEnumerator.cs" /> <Compile Include="System\Data\DataColumn.cs"> <SubType>Component</SubType> </Compile> <Compile Include="System\Data\DataColumnChangeEvent.cs" /> <Compile Include="System\Data\DataColumnChangeEventHandler.cs" /> <Compile Include="System\Data\DataColumnCollection.cs" /> <Compile Include="System\Data\DataColumnPropertyDescriptor.cs" /> <Compile Include="System\Data\DataError.cs" /> <Compile Include="System\Data\DataException.cs" /> <Compile Include="System\Data\DataKey.cs" /> <Compile Include="System\Data\DataRelation.cs" /> <Compile Include="System\Data\DataRelationCollection.cs" /> <Compile Include="System\Data\DataRelationPropertyDescriptor.cs" /> <Compile Include="System\Data\DataRow.cs" /> <Compile Include="System\Data\DataRowAction.cs" /> <Compile Include="System\Data\DataRowChangeEvent.cs" /> <Compile Include="System\Data\DataRowChangeEventHandler.cs" /> <Compile Include="System\Data\DataRowCollection.cs" /> <Compile Include="System\Data\DataRowComparer.cs" /> <Compile Include="System\Data\DataRowCreatedEventHandler.cs" /> <Compile Include="System\Data\DataRowExtensions.cs" /> <Compile Include="System\Data\DataRowState.cs" /> <Compile Include="System\Data\DataRowVersion.cs" /> <Compile Include="System\Data\DataRowView.cs" /> <Compile Include="System\Data\DataSerializationFormat.cs" /> <Compile Include="System\Data\DataSet.cs"> <SubType>Component</SubType> </Compile> <Compile Include="System\Data\DataSetUtil.cs" /> <Compile Include="System\Data\DataSetDateTime.cs" /> <Compile Include="System\Data\DataSysAttribute.cs" /> <Compile Include="System\Data\DataTable.cs"> <SubType>Component</SubType> </Compile> <Compile Include="System\Data\DataTableExtensions.cs" /> <Compile Include="System\Data\DataTableClearEvent.cs" /> <Compile Include="System\Data\DataTableClearEventHandler.cs" /> <Compile Include="System\Data\DataTableCollection.cs" /> <Compile Include="System\Data\DataTableNewRowEvent.cs" /> <Compile Include="System\Data\DataTableNewRowEventHandler.cs" /> <Compile Include="System\Data\DataTablePropertyDescriptor.cs" /> <Compile Include="System\Data\DataTableReader.cs" /> <Compile Include="System\Data\DataTableReaderListener.cs" /> <Compile Include="System\Data\DataTableTypeConverter.cs" /> <Compile Include="System\Data\DataView.cs"> <SubType>Component</SubType> </Compile> <Compile Include="System\Data\DataViewListener.cs" /> <Compile Include="System\Data\DataViewManager.cs"> <SubType>Component</SubType> </Compile> <Compile Include="System\Data\DataViewManagerListItemTypeDescriptor.cs" /> <Compile Include="System\Data\DataViewRowState.cs" /> <Compile Include="System\Data\DataViewSetting.cs" /> <Compile Include="System\Data\DataViewSettingCollection.cs" /> <Compile Include="System\Data\DBConcurrencyException.cs" /> <Compile Include="System\Data\DbType.cs" /> <Compile Include="System\Data\DefaultValueTypeConverter.cs" /> <Compile Include="System\Data\EnumerableRowCollection.cs" /> <Compile Include="System\Data\EnumerableRowCollectionExtensions.cs" /> <Compile Include="System\Data\FillErrorEventArgs.cs" /> <Compile Include="System\Data\FillErrorEventHandler.cs" /> <Compile Include="System\Data\ForeignKeyConstraint.cs" /> <Compile Include="System\Data\IColumnMapping.cs" /> <Compile Include="System\Data\IColumnMappingCollection.cs" /> <Compile Include="System\Data\IDataAdapter.cs" /> <Compile Include="System\Data\IDataParameter.cs" /> <Compile Include="System\Data\IDataParameterCollection.cs" /> <Compile Include="System\Data\IDataReader.cs" /> <Compile Include="System\Data\IDataRecord.cs" /> <Compile Include="System\Data\IDbCommand.cs" /> <Compile Include="System\Data\IDbConnection.cs" /> <Compile Include="System\Data\IDbDataAdapter.cs" /> <Compile Include="System\Data\IDbDataParameter.cs" /> <Compile Include="System\Data\IDbTransaction.cs" /> <Compile Include="System\Data\IsolationLevel.cs" /> <Compile Include="System\Data\ITableMapping.cs" /> <Compile Include="System\Data\ITableMappingCollection.cs" /> <Compile Include="System\Data\KeyRestrictionBehavior.cs" /> <Compile Include="System\Data\LinqDataView.cs" /> <Compile Include="System\Data\LoadOption.cs" /> <Compile Include="System\Data\LocalAppContextSwitches.cs" /> <Compile Include="$(CommonPath)System\LocalAppContextSwitches.Common.cs"> <Link>Common\System\LocalAppContextSwitches.Common.cs</Link> </Compile> <Compile Include="System\Data\MappingType.cs" /> <Compile Include="System\Data\MergeFailedEvent.cs" /> <Compile Include="System\Data\MergeFailedEventHandler.cs" /> <Compile Include="System\Data\Merger.cs" /> <Compile Include="System\Data\MissingMappingAction.cs" /> <Compile Include="System\Data\MissingSchemaAction.cs" /> <Compile Include="System\Data\OrderedEnumerableRowCollection.cs" /> <Compile Include="System\Data\ParameterDirection.cs" /> <Compile Include="System\Data\PrimaryKeyTypeConverter.cs" /> <Compile Include="System\Data\PropertyCollection.cs" /> <Compile Include="System\Data\Range.cs" /> <Compile Include="System\Data\RbTree.cs" /> <Compile Include="System\Data\RecordManager.cs" /> <Compile Include="System\Data\StatementCompletedEventArgs.cs" /> <Compile Include="System\Data\StatementCompletedEventHandler.cs" /> <Compile Include="System\Data\RelatedView.cs"> <SubType>Component</SubType> </Compile> <Compile Include="System\Data\RelationshipConverter.cs" /> <Compile Include="System\Data\Rule.cs" /> <Compile Include="System\Data\SchemaSerializationMode.cs" /> <Compile Include="System\Data\SchemaType.cs" /> <Compile Include="System\Data\Select.cs" /> <Compile Include="System\Data\Selection.cs" /> <Compile Include="System\Data\SimpleType.cs" /> <Compile Include="System\Data\SortExpressionBuilder.cs" /> <Compile Include="System\Data\SqlDbType.cs" /> <Compile Include="System\Data\StateChangeEvent.cs" /> <Compile Include="System\Data\StateChangeEventHandler.cs" /> <Compile Include="System\Data\StatementType.cs" /> <Compile Include="System\Data\StrongTypingException.cs" /> <Compile Include="System\Data\TypedTableBase.cs" /> <Compile Include="System\Data\TypedTableBaseExtensions.cs" /> <Compile Include="System\Data\TypeLimiter.cs" /> <Compile Include="System\Data\UniqueConstraint.cs" /> <Compile Include="System\Data\UpdateRowSource.cs" /> <Compile Include="System\Data\Common\UInt64Storage.cs" /> <Compile Include="System\Data\Common\AdapterUtil.Common.cs" /> <Compile Include="$(CommonPath)System\Data\Common\AdapterUtil.cs" Link="System\Data\Common\AdapterUtil.cs" /> <Compile Include="System\Data\Common\BigIntegerStorage.cs" /> <Compile Include="System\Data\Common\BooleanStorage.cs" /> <Compile Include="System\Data\Common\ByteStorage.cs" /> <Compile Include="System\Data\Common\CharStorage.cs" /> <Compile Include="System\Data\Common\DataAdapter.cs"> <SubType>Component</SubType> </Compile> <Compile Include="System\Data\Common\DataColumnMapping.cs" /> <Compile Include="System\Data\Common\DataColumnMappingCollection.cs" /> <Compile Include="System\Data\Common\DataCommonEventSource.cs" /> <Compile Include="System\Data\Common\DataRecordInternal.cs" /> <Compile Include="System\Data\Common\DataStorage.cs" /> <Compile Include="System\Data\Common\DataTableMapping.cs" /> <Compile Include="System\Data\Common\DataTableMappingCollection.cs" /> <Compile Include="System\Data\Common\DateTimeOffsetStorage.cs" /> <Compile Include="System\Data\Common\DateTimeStorage.cs" /> <Compile Include="System\Data\Common\DbBatch.cs" /> <Compile Include="System\Data\Common\DbBatchCommand.cs" /> <Compile Include="System\Data\Common\DbBatchCommandCollection.cs" /> <Compile Include="System\Data\Common\DbColumn.cs" /> <Compile Include="System\Data\Common\DbCommand.cs"> <SubType>Component</SubType> </Compile> <Compile Include="System\Data\Common\DBCommandBuilder.cs"> <SubType>Component</SubType> </Compile> <Compile Include="System\Data\Common\DbConnection.cs"> <SubType>Component</SubType> </Compile> <Compile Include="System\Data\Common\DbConnectionOptions.cs" /> <Compile Include="$(CommonPath)System\Data\Common\DbConnectionOptions.Common.cs" Link="System\Data\Common\DbConnectionOptions.Common.cs" /> <Compile Include="$(CommonPath)System\Data\Common\DbConnectionPoolKey.cs" Link="System\Data\Common\DbConnectionPoolKey.cs" /> <Compile Include="System\Data\Common\DbConnectionStringBuilder.cs" /> <Compile Include="System\Data\Common\DbConnectionStringCommon.cs" /> <Compile Include="System\Data\Common\DbConnectionStringBuilderDescriptor.cs" /> <Compile Include="System\Data\Common\DbDataAdapter.cs"> <SubType>Component</SubType> </Compile> <Compile Include="System\Data\Common\DbDataReader.cs" /> <Compile Include="System\Data\Common\DbDataReaderExtensions.cs" /> <Compile Include="System\Data\Common\DbDataRecord.cs" /> <Compile Include="System\Data\Common\DbDataSourceEnumerator.cs" /> <Compile Include="System\Data\Common\DbEnumerator.cs" /> <Compile Include="System\Data\Common\DbException.cs" /> <Compile Include="System\Data\Common\DbMetaDataCollectionNames.cs" /> <Compile Include="System\Data\Common\DbMetaDataColumnNames.cs" /> <Compile Include="System\Data\Common\DbParameter.cs" /> <Compile Include="System\Data\Common\DbParameterCollection.cs" /> <Compile Include="System\Data\Common\DbProviderFactory.cs" /> <Compile Include="System\Data\Common\DbProviderFactories.cs" /> <Compile Include="System\Data\Common\DbProviderSpecificTypePropertyAttribute.cs" /> <Compile Include="System\Data\Common\DBSchemaRow.cs" /> <Compile Include="System\Data\Common\DBSchemaTable.cs" /> <Compile Include="System\Data\Common\DbTransaction.cs" /> <Compile Include="System\Data\Common\DecimalStorage.cs" /> <Compile Include="System\Data\Common\DoubleStorage.cs" /> <Compile Include="System\Data\Common\FieldNameLookup.cs" /> <Compile Include="System\Data\Common\Groupbybehavior.cs" /> <Compile Include="System\Data\Common\IDbColumnSchemaGenerator.cs" /> <Compile Include="System\Data\Common\identifiercase.cs" /> <Compile Include="System\Data\Common\Int16Storage.cs" /> <Compile Include="System\Data\Common\Int32Storage.cs" /> <Compile Include="System\Data\Common\Int64Storage.cs" /> <Compile Include="$(CommonPath)System\Data\Common\MultipartIdentifier.cs" Link="System\Data\Common\MultipartIdentifier.cs" /> <Compile Include="$(CommonPath)System\Data\Common\NameValuePair.cs" Link="System\Data\Common\NameValuePair.cs" /> <Compile Include="System\Data\Common\ObjectStorage.cs" /> <Compile Include="System\Data\Common\RowUpdatedEventArgs.cs" /> <Compile Include="System\Data\Common\RowUpdatingEventArgs.cs" /> <Compile Include="System\Data\Common\SByteStorage.cs" /> <Compile Include="System\Data\Common\SchemaTableColumn.cs" /> <Compile Include="System\Data\Common\SchemaTableOptionalColumn.cs" /> <Compile Include="System\Data\Common\SingleStorage.cs" /> <Compile Include="System\Data\Common\SQLConvert.cs" /> <Compile Include="System\Data\Common\SqlUDTStorage.cs" /> <Compile Include="System\Data\Common\StringStorage.cs" /> <Compile Include="System\Data\Common\SupportedJoinOperators.cs" /> <Compile Include="System\Data\Common\TimeSpanStorage.cs" /> <Compile Include="System\Data\Common\UInt16Storage.cs" /> <Compile Include="System\Data\Common\UInt32Storage.cs" /> <Compile Include="System\Data\Common\SQLTypes\SQLCharsStorage.cs" /> <Compile Include="System\Data\Common\SQLTypes\SQLDateTimeStorage.cs" /> <Compile Include="System\Data\Common\SQLTypes\SQLDecimalStorage.cs" /> <Compile Include="System\Data\Common\SQLTypes\SQLDoubleStorage.cs" /> <Compile Include="System\Data\Common\SQLTypes\SQLGuidStorage.cs" /> <Compile Include="System\Data\Common\SQLTypes\SQLInt16Storage.cs" /> <Compile Include="System\Data\Common\SQLTypes\SQLInt32Storage.cs" /> <Compile Include="System\Data\Common\SQLTypes\SQLInt64Storage.cs" /> <Compile Include="System\Data\Common\SQLTypes\SQLMoneyStorage.cs" /> <Compile Include="System\Data\Common\SQLTypes\SQLSingleStorage.cs" /> <Compile Include="System\Data\Common\SQLTypes\SQLStringStorage.cs" /> <Compile Include="System\Data\Common\SQLTypes\SQLBinaryStorage.cs" /> <Compile Include="System\Data\Common\SQLTypes\SQlBooleanStorage.cs" /> <Compile Include="System\Data\Common\SQLTypes\SQLBytesStorage.cs" /> <Compile Include="System\Data\Common\SQLTypes\SQLByteStorage.cs" /> <Compile Include="System\Data\Filter\DataExpression.cs" /> <Compile Include="System\Data\Filter\ExpressionNode.cs" /> <Compile Include="System\Data\Filter\ExpressionParser.cs" /> <Compile Include="System\Data\Filter\FilterException.cs" /> <Compile Include="System\Data\Filter\FunctionNode.cs" /> <Compile Include="System\Data\Filter\IFilter.cs" /> <Compile Include="System\Data\Filter\LookupNode.cs" /> <Compile Include="System\Data\Filter\NameNode.cs" /> <Compile Include="System\Data\Filter\Operators.cs" /> <Compile Include="System\Data\Filter\UnaryNode.cs" /> <Compile Include="System\Data\Filter\ZeroOpNode.cs" /> <Compile Include="System\Data\Filter\AggregateNode.cs" /> <Compile Include="System\Data\Filter\BinaryNode.cs" /> <Compile Include="System\Data\Filter\ConstNode.cs" /> <Compile Include="System\Data\SQLTypes\SQLChars.cs" /> <Compile Include="System\Data\SQLTypes\SQLDateTime.cs" /> <Compile Include="System\Data\SQLTypes\SQLDecimal.cs" /> <Compile Include="System\Data\SQLTypes\SQLDouble.cs" /> <Compile Include="System\Data\SQLTypes\SQLGuid.cs" /> <Compile Include="System\Data\SQLTypes\SQLInt16.cs" /> <Compile Include="System\Data\SQLTypes\SQLInt32.cs" /> <Compile Include="System\Data\SQLTypes\SQLInt64.cs" /> <Compile Include="System\Data\SQLTypes\SQLMoney.cs" /> <Compile Include="$(CommonPath)System\Data\Common\SQLResource.cs" Link="System\Data\SQLTypes\SQLResource.cs" /> <Compile Include="System\Data\SQLTypes\SQLSingle.cs" /> <Compile Include="System\Data\SQLTypes\SQLString.cs" /> <Compile Include="System\Data\SQLTypes\SQLUtility.cs" /> <Compile Include="System\Data\SQLTypes\SqlXml.cs" /> <Compile Include="System\Data\SQLTypes\INullable.cs" /> <Compile Include="System\Data\SQLTypes\SQLBinary.cs" /> <Compile Include="System\Data\SQLTypes\SQLBoolean.cs" /> <Compile Include="System\Data\SQLTypes\SQLByte.cs" /> <Compile Include="System\Data\SQLTypes\SQLBytes.cs" /> <Compile Include="System\Data\ProviderBase\DataReaderContainer.cs" /> <Compile Include="System\Data\ProviderBase\SchemaMapping.cs" /> <Compile Include="$(CommonPath)System\Obsoletions.cs" Link="Common\System\Obsoletions.cs" /> </ItemGroup> <ItemGroup> <ProjectReference Include="$(CoreLibProject)" /> <ProjectReference Include="..\..\System.Collections\src\System.Collections.csproj" /> <ProjectReference Include="..\..\System.Collections.NonGeneric\src\System.Collections.NonGeneric.csproj" /> <ProjectReference Include="..\..\System.ComponentModel.TypeConverter\src\System.ComponentModel.TypeConverter.csproj" /> <ProjectReference Include="..\..\System.Runtime\src\System.Runtime.csproj" /> <ProjectReference Include="..\..\System.Runtime.Extensions\src\System.Runtime.Extensions.csproj" /> <ProjectReference Include="..\..\System.Private.Uri\src\System.Private.Uri.csproj" /> <Reference Include="System.Collections.Concurrent" /> <Reference Include="System.ComponentModel" /> <Reference Include="System.ComponentModel.Primitives" /> <Reference Include="System.Drawing.Primitives" /> <Reference Include="System.Linq" /> <Reference Include="System.Linq.Expressions" /> <Reference Include="System.ObjectModel" /> <Reference Include="System.Runtime.Numerics" /> <Reference Include="System.Runtime.Serialization.Formatters" /> <Reference Include="System.Text.RegularExpressions" /> <Reference Include="System.Transactions.Local" /> <Reference Include="System.Xml.ReaderWriter" /> <Reference Include="System.Xml.XmlSerializer" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,367
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled
When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
jakobbotsch
2022-03-09T00:01:53Z
2022-03-09T12:59:09Z
c9f7f7389e8e9a00d501aef696333b67d218baac
cef825fd5425203ac28d073bf9fccc33c638f179
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled. When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
./src/libraries/System.CodeDom/tests/System/CodeDom/Compiler/CodeGeneratorTests.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.ComponentModel; using System.Diagnostics; using System.Globalization; using System.IO; using System.Reflection; using System.Text; using Xunit; namespace System.CodeDom.Compiler.Tests { public class CodeGeneratorTests : CodeGenerator { [Fact] public void Ctor_Default() { CodeGeneratorTests generator = this; Assert.Null(generator.CurrentClass); Assert.Null(generator.CurrentMember); Assert.Equal("<% unknown %>", generator.CurrentMemberName); Assert.Equal("<% unknown %>", generator.CurrentTypeName); Assert.Throws<NullReferenceException>(() => generator.Indent); Assert.False(generator.IsCurrentClass); Assert.False(generator.IsCurrentDelegate); Assert.False(generator.IsCurrentEnum); Assert.False(generator.IsCurrentInterface); Assert.False(generator.IsCurrentStruct); Assert.Null(generator.Options); Assert.Null(generator.Output); } [Theory] [InlineData(null, "")] [InlineData("", "")] [InlineData("st", "st")] public void ContinueOnNewLine_InvokeWithOutput_Appends(string st, string expected) { CodeGeneratorTests generator = this; generator.PerformActionWithOutput(writer => { generator.ContinueOnNewLine(st); Assert.Equal(expected + Environment.NewLine, writer.ToString()); }); } [Theory] [InlineData(null)] [InlineData("")] [InlineData("st")] public void ContinueOnNewLine_InvokeWithoutOutput_ThrowsNullReferenceException(string st) { CodeGeneratorTests generator = this; Assert.Throws<NullReferenceException>(() => generator.ContinueOnNewLine(st)); } [Theory] [InlineData(-1, 0)] [InlineData(0, 0)] [InlineData(3, 3)] public void Indent_SetWithOutput_GetReturnsExpected(int value, int expected) { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { generator.Indent = value; Assert.Equal(expected, generator.Indent); }); } [Fact] public void Indent_SetWithoutOutput_ThrowsNullReferenceException() { CodeGeneratorTests generator = this; Assert.Throws<NullReferenceException>(() => generator.Indent = 1); } public static IEnumerable<object[]> GenerateBinaryOperatorExpression_TestData() { yield return new object[] { new CodeBinaryOperatorExpression(new CodePrimitiveExpression(1), CodeBinaryOperatorType.Add, new CodePrimitiveExpression(2)), "(1 + 2)" }; yield return new object[] { new CodeBinaryOperatorExpression(new CodeBinaryOperatorExpression(new CodePrimitiveExpression(1), CodeBinaryOperatorType.Multiply, new CodePrimitiveExpression(2)), CodeBinaryOperatorType.Add, new CodePrimitiveExpression(3)), $"((1 * 2) {Environment.NewLine} + 3)" }; yield return new object[] { new CodeBinaryOperatorExpression(new CodePrimitiveExpression(1), CodeBinaryOperatorType.Multiply, new CodeBinaryOperatorExpression(new CodePrimitiveExpression(2), CodeBinaryOperatorType.Add, new CodePrimitiveExpression(3))), $"(1 {Environment.NewLine} * (2 + 3))" }; } [Theory] [MemberData(nameof(GenerateBinaryOperatorExpression_TestData))] public void GenerateBinaryOperatorExpression_Invoke_Success(CodeBinaryOperatorExpression e, string expected) { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { generator.OutputIdentifierAction = (actualIdentifier, baseMethod) => baseMethod(actualIdentifier); generator.OutputOperatorAction = (actualOp, baseMethod) => baseMethod(actualOp); generator.GeneratePrimitiveExpressionAction = (actualE, baseMethod) => baseMethod(actualE); generator.GenerateBinaryOperatorExpression(e); Assert.Equal(expected, writer.ToString()); // Call again to make sure indent is reset. Assert.Equal(expected, writer.ToString()); }); } [Fact] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Not fixed on NetFX")] public void GenerateBinaryOperatorExpression_NullE_ThrowsArgumentNullException() { CodeGeneratorTests generator = this; Assert.Throws<ArgumentNullException>("e", () => generator.GenerateBinaryOperatorExpression(null)); } [Fact] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Not fixed on NetFX")] public void GenerateBinaryOperatorExpression_NullLeftE_ThrowsArgumentNullException() { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { var e = new CodeBinaryOperatorExpression(null, CodeBinaryOperatorType.Add, new CodePrimitiveExpression(1)); generator.OutputOperatorAction = (actualOp, baseMethod) => baseMethod(actualOp); generator.GeneratePrimitiveExpressionAction = (actualE, baseMethod) => baseMethod(actualE); Assert.Throws<ArgumentNullException>("e", () => generator.GenerateBinaryOperatorExpression(null)); }); } [Fact] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Not fixed on NetFX")] public void GenerateBinaryOperatorExpression_NullRightE_ThrowsArgumentNullException() { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { var e = new CodeBinaryOperatorExpression(new CodePrimitiveExpression(1), CodeBinaryOperatorType.Add, null); generator.OutputOperatorAction = (actualOp, baseMethod) => baseMethod(actualOp); generator.GeneratePrimitiveExpressionAction = (actualE, baseMethod) => baseMethod(actualE); Assert.Throws<ArgumentNullException>("e", () => generator.GenerateBinaryOperatorExpression(null)); }); } [Fact] public void GenerateBinaryOperatorExpression_InvokeWithoutOutput_ThrowsNullReferenceException() { CodeGeneratorTests generator = this; var e = new CodeBinaryOperatorExpression(); generator.OutputOperatorAction = (actualOp, baseMethod) => baseMethod(actualOp); generator.GeneratePrimitiveExpressionAction = (actualE, baseMethod) => baseMethod(actualE); Assert.Throws<NullReferenceException>(() => generator.GenerateBinaryOperatorExpression(e)); } public static IEnumerable<object[]> GenerateCodeFromMember_TestData() { yield return new object[] { new CodeTypeMember(), null, Environment.NewLine }; yield return new object[] { new CodeTypeMember(), new CodeGeneratorOptions(), Environment.NewLine }; yield return new object[] { new CodeTypeMember(), new CodeGeneratorOptions { BlankLinesBetweenMembers = false}, string.Empty }; } [Theory] [MemberData(nameof(GenerateCodeFromMember_TestData))] public void GenerateCodeFromMember_Invoke_Success(CodeTypeMember member, CodeGeneratorOptions options, string expected) { CodeGeneratorTests generator = this; generator.GenerateCommentStatementsAction = (actualE, baseMethod) => baseMethod(actualE); var writer = new StringWriter(); generator.GenerateCodeFromMember(member, writer, options); Assert.Equal(expected, writer.ToString()); Assert.Null(generator.Output); Assert.Null(generator.Options); Assert.Null(generator.CurrentClass); Assert.Null(generator.CurrentMember); Assert.Equal("<% unknown %>", generator.CurrentMemberName); Assert.Equal("<% unknown %>", generator.CurrentTypeName); } [Fact] public void GenerateCodeFromMember_InvokeWithCommentsDirectivesAndLinePragma_Success() { CodeGeneratorTests generator = this; var member = new CodeTypeMember { LinePragma = new CodeLinePragma() }; member.Comments.Add(new CodeCommentStatement("Comment")); member.Comments.Add(new CodeCommentStatement("Comment")); member.StartDirectives.Add(new CodeDirective()); member.StartDirectives.Add(new CodeChecksumPragma()); member.StartDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); member.EndDirectives.Add(new CodeDirective()); member.EndDirectives.Add(new CodeChecksumPragma()); member.EndDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); var writer = new StringWriter(); int generateCommentStatementsCallCount = 0; int generateCommentCallCount = 0; int generateLinePragmaStartCallCount = 0; int generateDirectivesCallCount = 0; int generateLinePragmaEndCallCount = 0; generator.GenerateCommentStatementsAction = (actualE, baseMethod) => { baseMethod(actualE); Assert.Same(member.Comments, actualE); writer.Write("Comments "); Assert.Equal(0, generateLinePragmaStartCallCount); Assert.Equal(1, generateDirectivesCallCount); Assert.Equal(0, generateLinePragmaEndCallCount); generateCommentStatementsCallCount++; }; generator.GenerateCommentAction = (actualE) => { Assert.Same(member.Comments[generateCommentCallCount].Comment, actualE); writer.Write("Comment "); generateCommentCallCount++; }; generator.GenerateLinePragmaStartAction = (actualE) => { Assert.Same(member.LinePragma, actualE); Assert.Equal(0, generateLinePragmaEndCallCount); Assert.Equal(1, generateDirectivesCallCount); writer.Write("LinePragmaStart "); generateLinePragmaStartCallCount++; }; generator.GenerateLinePragmaEndAction = (actualE) => { Assert.Same(member.LinePragma, actualE); Assert.Equal(1, generateDirectivesCallCount); writer.Write("LinePragmaEnd "); generateLinePragmaEndCallCount++; }; generator.GenerateDirectivesAction = (actualDirectives, baseMethod) => { baseMethod(actualDirectives); Assert.Same(generateDirectivesCallCount == 0 ? member.StartDirectives : member.EndDirectives, actualDirectives); writer.Write(generateDirectivesCallCount == 0 ? "StartDirectives " : "EndDirectives"); generateDirectivesCallCount++; }; generator.GenerateCodeFromMember(member, writer, null); Assert.Equal($"{Environment.NewLine}StartDirectives Comment Comment Comments LinePragmaStart LinePragmaEnd EndDirectives", writer.ToString()); Assert.Equal(1, generateCommentStatementsCallCount); Assert.Equal(2, generateCommentCallCount); Assert.Equal(1, generateLinePragmaStartCallCount); Assert.Equal(2, generateDirectivesCallCount); Assert.Equal(1, generateLinePragmaEndCallCount); } [Fact] public void GenerateCodeFromMember_CodeConstructor_CallsCorrectMethod() { CodeGeneratorTests generator = this; var writer = new StringWriter(); var member = new CodeConstructor(); int callCount = 0; generator.GenerateConstructorAction = (actualE, type) => { Assert.Same(member, actualE); Assert.IsType<CodeTypeDeclaration>(type); callCount++; }; generator.GenerateCodeFromMember(member, writer, null); Assert.Equal(Environment.NewLine, writer.ToString()); Assert.Equal(1, callCount); } [Fact] public void GenerateCodeFromMember_CodeEntryPointMethod_CallsCorrectMethod() { CodeGeneratorTests generator = this; var writer = new StringWriter(); var member = new CodeEntryPointMethod(); int callCount = 0; generator.GenerateEntryPointMethodAction = (actualE, type) => { Assert.Same(member, actualE); Assert.IsType<CodeTypeDeclaration>(type); callCount++; }; generator.GenerateCodeFromMember(member, writer, null); Assert.Equal(Environment.NewLine, writer.ToString()); Assert.Equal(1, callCount); } [Fact] public void GenerateCodeFromMember_CodeMemberEvent_CallsCorrectMethod() { CodeGeneratorTests generator = this; var writer = new StringWriter(); var member = new CodeMemberEvent(); int callCount = 0; generator.GenerateEventAction = (actualE, type) => { Assert.Same(member, actualE); Assert.IsType<CodeTypeDeclaration>(type); callCount++; }; generator.GenerateCodeFromMember(member, writer, null); Assert.Equal(Environment.NewLine, writer.ToString()); Assert.Equal(1, callCount); } [Fact] public void GenerateCodeFromMember_CodeMemberField_CallsCorrectMethod() { CodeGeneratorTests generator = this; var writer = new StringWriter(); var member = new CodeMemberField(); int callCount = 0; generator.GenerateFieldAction = (actualE) => { Assert.Same(member, actualE); callCount++; }; generator.GenerateCodeFromMember(member, writer, null); Assert.Equal(Environment.NewLine, writer.ToString()); Assert.Equal(1, callCount); } [Fact] public void GenerateCodeFromMember_CodeMemberMethod_CallsCorrectMethod() { CodeGeneratorTests generator = this; var writer = new StringWriter(); var member = new CodeMemberMethod(); int callCount = 0; generator.GenerateMethodAction = (actualE, type) => { Assert.Same(member, actualE); Assert.IsType<CodeTypeDeclaration>(type); callCount++; }; generator.GenerateCodeFromMember(member, writer, null); Assert.Equal(Environment.NewLine, writer.ToString()); Assert.Equal(1, callCount); } [Fact] public void GenerateCodeFromMember_CodeMemberProperty_CallsCorrectMethod() { CodeGeneratorTests generator = this; var writer = new StringWriter(); var member = new CodeMemberProperty(); int callCount = 0; generator.GeneratePropertyAction = (actualE, type) => { Assert.Same(member, actualE); Assert.IsType<CodeTypeDeclaration>(type); callCount++; }; generator.GenerateCodeFromMember(member, writer, null); Assert.Equal(Environment.NewLine, writer.ToString()); Assert.Equal(1, callCount); } [Fact] public void GenerateCodeFromMember_CodeSnippetTypeMember_CallsCorrectMethod() { CodeGeneratorTests generator = this; var writer = new StringWriter(); var member = new CodeSnippetTypeMember(); int callCount = 0; generator.GenerateSnippetMemberAction = (actualE) => { Assert.Same(member, actualE); callCount++; }; generator.GenerateCodeFromMember(member, writer, null); Assert.Equal(Environment.NewLine + Environment.NewLine, writer.ToString()); } [Fact] public void GenerateCodeFromMember_CodeTypeConstructor_CallsCorrectMethod() { CodeGeneratorTests generator = this; var writer = new StringWriter(); var member = new CodeTypeConstructor(); int callCount = 0; generator.GenerateTypeConstructorAction = (actualE) => { Assert.Same(member, actualE); callCount++; }; generator.GenerateCodeFromMember(member, writer, null); Assert.Equal(Environment.NewLine, writer.ToString()); Assert.Equal(1, callCount); } [Fact] public void GenerateCodeFromMember_InvokeWithOutput_ThrowsInvalidOperationException() { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { Assert.Throws<InvalidOperationException>(() => generator.GenerateCodeFromMember(new CodeTypeMember(), new StringWriter(), new CodeGeneratorOptions())); }); } [Fact] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Not fixed on NetFX")] public void GenerateCodeFromMember_NullMember_ThrowsArgumentNullException() { CodeGeneratorTests generator = this; Assert.Throws<ArgumentNullException>("member", () => generator.GenerateCodeFromMember(null, new StringWriter(), new CodeGeneratorOptions())); } [Fact] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Not fixed on NetFX")] public void GenerateCodeFromMember_NullWriter_ThrowsArgumentNullException() { CodeGeneratorTests generator = this; Assert.Throws<ArgumentNullException>("writer", () => generator.GenerateCodeFromMember(new CodeTypeMember(), null, new CodeGeneratorOptions())); } [Theory] [InlineData(null)] [InlineData("")] [InlineData("text")] public void GenerateCommentStatement_Invoke_CallsCorrectMethod(string text) { CodeGeneratorTests generator = this; var e = new CodeCommentStatement(text); int callCount = 0; generator.GenerateCommentAction = (actualComment) => { Assert.Same(e.Comment, actualComment); callCount++; }; generator.GenerateCommentStatement(e); Assert.Equal(1, callCount); } [Fact] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Not fixed on NetFX")] public void GenerateCommentStatement_NullE_ThrowsArgumentNullException() { CodeGeneratorTests generator = this; Assert.Throws<ArgumentNullException>("e", () => generator.GenerateCommentStatement(null)); } [Fact] public void GenerateCommentStatement_NullEComment_ThrowsArgumentException() { CodeGeneratorTests generator = this; var e = new CodeCommentStatement(); Assert.Throws<ArgumentException>("e", () => generator.GenerateCommentStatement(e)); } [Theory] [InlineData(null)] [InlineData("")] [InlineData("text")] public void GenerateCommentStatements_InvokeNonEmpty_CallsCorrectMethod(string text) { CodeGeneratorTests generator = this; generator.GenerateCommentStatementsAction = (actualE, baseMethod) => baseMethod(actualE); var e = new CodeCommentStatementCollection(new CodeCommentStatement[] { new CodeCommentStatement(text), new CodeCommentStatement("otherText") }); int callCount = 0; generator.GenerateCommentAction = (actualComment) => { Assert.Same(e[callCount].Comment, actualComment); callCount++; }; generator.GenerateCommentStatements(e); Assert.Equal(2, callCount); } [Fact] public void GenerateCommentStatements_InvokeEmptyE_Nop() { CodeGeneratorTests generator = this; generator.GenerateCommentStatementsAction = (actualE, baseMethod) => baseMethod(actualE); generator.GenerateCommentStatements(new CodeCommentStatementCollection()); } [Fact] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Not fixed on NetFX")] public void GenerateCommentStatements_NullE_ThrowsArgumentNullException() { CodeGeneratorTests generator = this; generator.GenerateCommentStatementsAction = (actualE, baseMethod) => baseMethod(actualE); Assert.Throws<ArgumentNullException>("e", () => generator.GenerateCommentStatements(null)); } [Fact] public void GenerateCommentStatements_NullValueInE_ThrowsArgumentException() { CodeGeneratorTests generator = this; generator.GenerateCommentStatementsAction = (actualE, baseMethod) => baseMethod(actualE); var e = new CodeCommentStatementCollection(new CodeCommentStatement[] { new CodeCommentStatement() }); Assert.Throws<ArgumentException>("e", () => generator.GenerateCommentStatements(e)); } public static IEnumerable<object[]> GenerateCompileUnit_TestData() { yield return new object[] { new CodeCompileUnit() }; } [Theory] [MemberData(nameof(GenerateCompileUnit_TestData))] public void GenerateCompileUnit_InvokeWithOutput_Success(CodeCompileUnit e) { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { generator.GenerateCompileUnitAction = (actualE, baseMethod) => baseMethod(actualE); int generateCompileUnitStartCallCount = 0; int generateCompileUnitEndCallCount = 0; generator.GenerateCompileUnitStartAction = (actualE, baseMethod) => { baseMethod(actualE); Assert.Same(e, actualE); Assert.Equal(0, generateCompileUnitEndCallCount); generateCompileUnitStartCallCount++; }; generator.GenerateCompileUnitEndAction = (actualE, baseMethod) => { baseMethod(actualE); Assert.Same(e, actualE); generateCompileUnitEndCallCount++; }; generator.GenerateCompileUnit(e); Assert.Equal(1, generateCompileUnitStartCallCount); Assert.Equal(1, generateCompileUnitEndCallCount); Assert.Empty(writer.ToString()); }); } [Fact] public void GenerateCompileUnit_InvokeWithDirectives_Success() { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { generator.GenerateCompileUnitAction = (actualE, baseMethod) => baseMethod(actualE); var e = new CodeSnippetCompileUnit("value") { LinePragma = new CodeLinePragma() }; e.StartDirectives.Add(new CodeDirective()); e.StartDirectives.Add(new CodeChecksumPragma()); e.StartDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); e.EndDirectives.Add(new CodeDirective()); e.EndDirectives.Add(new CodeChecksumPragma()); e.EndDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); int generateCompileUnitStartCallCount = 0; int generateCompileUnitEndCallCount = 0; int generateDirectivesCallCount = 0; generator.GenerateCompileUnitStartAction = (actualE, baseMethod) => { baseMethod(actualE); Assert.Same(e, actualE); Assert.Equal(0, generateCompileUnitEndCallCount); Assert.Equal(1, generateDirectivesCallCount); generateCompileUnitStartCallCount++; }; generator.GenerateCompileUnitEndAction = (actualE, baseMethod) => { baseMethod(actualE); Assert.Same(e, actualE); Assert.Equal(2, generateDirectivesCallCount); generateCompileUnitEndCallCount++; }; generator.GenerateDirectivesAction = (actualDirectives, baseMethod) => { Assert.Same(generateDirectivesCallCount == 0 ? e.StartDirectives : e.EndDirectives, actualDirectives); writer.Write(generateDirectivesCallCount == 0 ? "StartDirectives " : "EndDirectives"); generateDirectivesCallCount++; }; generator.GenerateCompileUnit(e); Assert.Equal(1, generateCompileUnitStartCallCount); Assert.Equal(1, generateCompileUnitEndCallCount); Assert.Equal(2, generateDirectivesCallCount); Assert.Equal("StartDirectives EndDirectives", writer.ToString()); }); } [Theory] [MemberData(nameof(GenerateCompileUnit_TestData))] public void GenerateCompileUnit_InvokeWithoutOutput_Success(CodeCompileUnit e) { CodeGeneratorTests generator = this; generator.GenerateCompileUnitAction = (actualE, baseMethod) => baseMethod(actualE); int generateCompileUnitStartCallCount = 0; int generateCompileUnitEndCallCount = 0; generator.GenerateCompileUnitStartAction = (actualE, baseMethod) => { Assert.Same(e, actualE); Assert.Equal(0, generateCompileUnitEndCallCount); generateCompileUnitStartCallCount++; }; generator.GenerateCompileUnitEndAction = (actualE, baseMethod) => { Assert.Same(e, actualE); generateCompileUnitEndCallCount++; }; generator.GenerateCompileUnit(e); Assert.Equal(1, generateCompileUnitStartCallCount); Assert.Equal(1, generateCompileUnitEndCallCount); } [Fact] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Not fixed on NetFX")] public void GenerateCompileUnit_NullE_ThrowsArgumentNullException() { CodeGeneratorTests generator = this; generator.GenerateCompileUnitStartAction = (actualE, baseMethod) => baseMethod(actualE); generator.GenerateCompileUnitAction = (actualE, baseMethod) => baseMethod(actualE); Assert.Throws<ArgumentNullException>("e", () => generator.GenerateCompileUnit(null)); } [Fact] public void GenerateCompileUnit_NullOutputWithNamespace_ThrowsNullReferenceException() { CodeGeneratorTests generator = this; var e = new CodeCompileUnit(); e.Namespaces.Add(new CodeNamespace("name")); generator.GenerateCompileUnitStartAction = (actualE, baseMethod) => baseMethod(actualE); generator.GenerateCompileUnitAction = (actualE, baseMethod) => baseMethod(actualE); Assert.Throws<NullReferenceException>(() => generator.GenerateCompileUnit(e)); } [Fact] public void GenerateCompileUnitEnd_InvokeWithEndDirectives_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeCompileUnit(); e.EndDirectives.Add(new CodeDirective()); e.EndDirectives.Add(new CodeChecksumPragma()); e.EndDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); generator.GenerateCompileUnitEndAction = (actualE, baseMethod) => baseMethod(actualE); int generateDirectivesCallCount = 0; generator.GenerateDirectivesAction = (actualDirectives, baseMethod) => { baseMethod(actualDirectives); Assert.Same(e.EndDirectives, actualDirectives); generateDirectivesCallCount++; }; generator.GenerateCompileUnitEnd(e); Assert.Equal(1, generateDirectivesCallCount); } [Fact] public void GenerateCompileUnitEnd_InvokeWithoutEndDirectives_Nop() { CodeGeneratorTests generator = this; var e = new CodeCompileUnit(); generator.GenerateCompileUnitEndAction = (actualE, baseMethod) => baseMethod(actualE); generator.GenerateCompileUnitEnd(e); } [Fact] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Not fixed on NetFX")] public void GenerateCompileUnitEnd_NullE_ThrowsArgumentNullException() { CodeGeneratorTests generator = this; generator.GenerateCompileUnitEndAction = (actualE, baseMethod) => baseMethod(actualE); Assert.Throws<ArgumentNullException>("e", () => generator.GenerateCompileUnitEnd(null)); } [Fact] public void GenerateCompileUnitStart_InvokeWithStartDirectives_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeCompileUnit(); e.StartDirectives.Add(new CodeDirective()); e.StartDirectives.Add(new CodeChecksumPragma()); e.StartDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); generator.GenerateCompileUnitStartAction = (actualE, baseMethod) => baseMethod(actualE); int generateDirectivesCallCount = 0; generator.GenerateDirectivesAction = (actualDirectives, baseMethod) => { baseMethod(actualDirectives); Assert.Same(e.StartDirectives, actualDirectives); generateDirectivesCallCount++; }; generator.GenerateCompileUnitStart(e); Assert.Equal(1, generateDirectivesCallCount); } [Fact] public void GenerateCompileUnitStart_InvokeWithoutStartDirectives_Nop() { CodeGeneratorTests generator = this; var e = new CodeCompileUnit(); generator.GenerateCompileUnitStartAction = (actualE, baseMethod) => baseMethod(actualE); generator.GenerateCompileUnitStart(e); } [Fact] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Not fixed on NetFX")] public void GenerateCompileUnitStart_NullE_ThrowsArgumentNullException() { CodeGeneratorTests generator = this; generator.GenerateCompileUnitStartAction = (actualE, baseMethod) => baseMethod(actualE); Assert.Throws<ArgumentNullException>("e", () => generator.GenerateCompileUnitStart(null)); } [Fact] public void GenerateDecimalValue_Invoke_Success() { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { generator.GenerateDecimalValueAction = (actualS, baseMethod) => baseMethod(actualS); generator.GenerateDecimalValue(decimal.MaxValue); Assert.Equal("79228162514264337593543950335", writer.ToString()); }); } [Fact] public void GenerateDecimalValue_InvokeWithoutWriter_ThrowsNullReferenceException() { CodeGeneratorTests generator = this; generator.GenerateDecimalValueAction = (actualS, baseMethod) => baseMethod(actualS); Assert.Throws<NullReferenceException>(() => generator.GenerateDecimalValue(1)); } public static IEnumerable<object[]> GenerateDefaultValueExpression_TestData() { yield return new object[] { null }; yield return new object[] { new CodeDefaultValueExpression() }; } [Theory] [MemberData(nameof(GenerateDefaultValueExpression_TestData))] public void GenerateDefaultValueExpression_InvokeWithOutput_Nop(CodeDefaultValueExpression e) { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { generator.GenerateDefaultValueExpressionAction = (actualE, baseMethod) => baseMethod(e); generator.GenerateDefaultValueExpression(e); }); } [Theory] [MemberData(nameof(GenerateDefaultValueExpression_TestData))] public void GenerateDefaultValueExpression_InvokeWithoutOutput_Nop(CodeDefaultValueExpression e) { CodeGeneratorTests generator = this; generator.GenerateDefaultValueExpressionAction = (actualE, baseMethod) => baseMethod(actualE); generator.GenerateDefaultValueExpression(e); } public static IEnumerable<object[]> GenerateDirectionExpression_TestData() { yield return new object[] { FieldDirection.In, "1" }; yield return new object[] { FieldDirection.Out, "out 1" }; yield return new object[] { FieldDirection.Ref, "ref 1" }; yield return new object[] { FieldDirection.In - 1, "1" }; yield return new object[] { FieldDirection.Ref + 1, "1" }; } [Theory] [MemberData(nameof(GenerateDirectionExpression_TestData))] public void GenerateDirectionExpression_Invoke_Success(FieldDirection direction, string expected) { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { var e = new CodeDirectionExpression(direction, new CodePrimitiveExpression(1)); generator.GenerateDirectionExpressionAction = (actualE, baseMethod) => baseMethod(actualE); generator.GeneratePrimitiveExpressionAction = (actualE, baseMethod) => baseMethod(actualE); int outputDirectionCallCount = 0; generator.OutputDirectionAction = (actualDirection, baseMethod) => { baseMethod(actualDirection); Assert.Equal(e.Direction, actualDirection); outputDirectionCallCount++; }; generator.GenerateDirectionExpression(e); Assert.Equal(expected, writer.ToString()); Assert.Equal(1, outputDirectionCallCount); }); } [Fact] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Not fixed on NetFX")] public void GenerateDirectionExpression_NullE_ThrowsArgumentNullException() { CodeGeneratorTests generator = this; generator.GenerateDirectionExpressionAction = (actualE, baseMethod) => baseMethod(actualE); generator.OutputDirectionAction = (actualDirection, baseMethod) => baseMethod(actualDirection); Assert.Throws<ArgumentNullException>("e", () => generator.GenerateDirectionExpression(null)); } [Fact] public void GenerateDirectionExpression_NullEExpression_ThrowsArgumentNullException() { CodeGeneratorTests generator = this; var e = new CodeDirectionExpression(); generator.GenerateDirectionExpressionAction = (actualE, baseMethod) => baseMethod(actualE); generator.OutputDirectionAction = (actualDirection, baseMethod) => baseMethod(actualDirection); generator.GeneratePrimitiveExpressionAction = (actualE, baseMethod) => baseMethod(actualE); Assert.Throws<ArgumentNullException>("e", () => generator.GenerateDirectionExpression(e)); } [Theory] [InlineData(FieldDirection.Out)] [InlineData(FieldDirection.Ref)] public void GenerateDirectionExpression_InvokeNonInWithoutWriter_ThrowsNullReferenceException(FieldDirection direction) { CodeGeneratorTests generator = this; var e = new CodeDirectionExpression(direction, new CodePrimitiveExpression(1)); generator.GenerateDirectionExpressionAction = (actualE, baseMethod) => baseMethod(actualE); generator.OutputDirectionAction = (actualDirection, baseMethod) => baseMethod(actualDirection); generator.GeneratePrimitiveExpressionAction = (actualE, baseMethod) => baseMethod(actualE); Assert.Throws<NullReferenceException>(() => generator.GenerateDirectionExpression(e)); } public static IEnumerable<object[]> GenerateDirectives_TestData() { yield return new object[] { null }; yield return new object[] { new CodeDirectiveCollection() }; yield return new object[] { new CodeDirectiveCollection(new CodeDirective[] { new CodeDirective() }) }; } [Theory] [MemberData(nameof(GenerateDirectives_TestData))] public void GenerateDirectives_InvokeWithOutput_Nop(CodeDirectiveCollection directives) { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { generator.GenerateDirectivesAction = (actualDirectives, baseMethod) => baseMethod(actualDirectives); generator.GenerateDirectives(directives); }); } [Theory] [MemberData(nameof(GenerateDirectives_TestData))] public void GenerateDirectives_InvokeWithoutOutput_Nop(CodeDirectiveCollection directives) { CodeGeneratorTests generator = this; generator.GenerateDirectivesAction = (actualDirectives, baseMethod) => baseMethod(actualDirectives); generator.GenerateDirectives(directives); } [Fact] public void GenerateDoubleValue_Invoke_Success() { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { generator.GenerateDoubleValueAction = (actualS, baseMethod) => baseMethod(actualS); generator.GenerateDoubleValue(double.MaxValue); Assert.Equal("1.7976931348623157E+308", writer.ToString()); }); } [Fact] public void GenerateDoubleValue_InvokeWithoutWriter_ThrowsNullReferenceException() { CodeGeneratorTests generator = this; generator.GenerateDoubleValueAction = (actualS, baseMethod) => baseMethod(actualS); Assert.Throws<NullReferenceException>(() => generator.GenerateDoubleValue(1)); } [Fact] public void GenerateExpression_CodeArgumentReferenceExpression_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeArgumentReferenceExpression(); int callCount = 0; generator.GenerateArgumentReferenceExpressionAction = (actualE) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateExpression(e); Assert.Equal(1, callCount); } [Fact] public void GenerateExpression_CodeArrayCreateExpression_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeArrayCreateExpression(); int callCount = 0; generator.GenerateArrayCreateExpressionAction = (actualE) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateExpression(e); Assert.Equal(1, callCount); } [Fact] public void GenerateExpression_CodeArrayIndexerExpression_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeArrayIndexerExpression(); int callCount = 0; generator.GenerateArrayIndexerExpressionAction = (actualE) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateExpression(e); Assert.Equal(1, callCount); } [Fact] public void GenerateExpression_CodeBaseReferenceExpression_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeBaseReferenceExpression(); int callCount = 0; generator.GenerateBaseReferenceExpressionAction = (actualE) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateExpression(e); Assert.Equal(1, callCount); } [Fact] public void GenerateExpression_CodeCastExpression_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeCastExpression(); int callCount = 0; generator.GenerateCastExpressionAction = (actualE) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateExpression(e); Assert.Equal(1, callCount); } [Fact] public void GenerateExpression_CodeDefaultValueExpression_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeDefaultValueExpression(); int callCount = 0; generator.GenerateDefaultValueExpressionAction = (actualE, baseMethod) => { baseMethod(actualE); Assert.Same(e, actualE); callCount++; }; generator.GenerateExpression(e); Assert.Equal(1, callCount); } [Fact] public void GenerateExpression_CodeDelegateCreateExpression_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeDelegateCreateExpression(); int callCount = 0; generator.GenerateDelegateCreateExpressionAction = (actualE) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateExpression(e); Assert.Equal(1, callCount); } [Fact] public void GenerateExpression_CodeDelegateInvokeExpression_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeDelegateInvokeExpression(); int callCount = 0; generator.GenerateDelegateInvokeExpressionAction = (actualE) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateExpression(e); Assert.Equal(1, callCount); } [Fact] public void GenerateExpression_CodeDirectionExpression_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeDirectionExpression(); int callCount = 0; generator.GenerateDirectionExpressionAction = (actualE, baseMethod) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateExpression(e); Assert.Equal(1, callCount); } [Fact] public void GenerateExpression_CodeEventReferenceExpression_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeEventReferenceExpression(); int callCount = 0; generator.GenerateEventReferenceExpressionAction = (actualE) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateExpression(e); Assert.Equal(1, callCount); } [Fact] public void GenerateExpression_CodeFieldReferenceExpression_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeFieldReferenceExpression(); int callCount = 0; generator.GenerateFieldReferenceExpressionAction = (actualE) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateExpression(e); Assert.Equal(1, callCount); } [Fact] public void GenerateExpression_CodeIndexerExpression_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeIndexerExpression(); int callCount = 0; generator.GenerateIndexerExpressionAction = (actualE) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateExpression(e); Assert.Equal(1, callCount); } [Fact] public void GenerateExpression_CodeMethodInvokeExpression_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeMethodInvokeExpression(); int callCount = 0; generator.GenerateMethodInvokeExpressionAction = (actualE) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateExpression(e); Assert.Equal(1, callCount); } [Fact] public void GenerateExpression_CodeMethodReferenceExpression_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeMethodReferenceExpression(); int callCount = 0; generator.GenerateMethodReferenceExpressionAction = (actualE) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateExpression(e); Assert.Equal(1, callCount); } [Fact] public void GenerateExpression_CodeObjectCreateExpression_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeObjectCreateExpression(); int callCount = 0; generator.GenerateObjectCreateExpressionAction = (actualE) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateExpression(e); Assert.Equal(1, callCount); } [Fact] public void GenerateExpression_CodeParameterDeclarationExpression_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeParameterDeclarationExpression(); int callCount = 0; generator.GenerateParameterDeclarationExpressionAction = (actualE, baseMethod) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateExpression(e); Assert.Equal(1, callCount); } [Fact] public void GenerateExpression_CodePrimitiveExpression_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodePrimitiveExpression(); int callCount = 0; generator.GeneratePrimitiveExpressionAction = (actualE, baseMethod) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateExpression(e); Assert.Equal(1, callCount); } [Fact] public void GenerateExpression_CodePropertyReferenceExpression_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodePropertyReferenceExpression(); int callCount = 0; generator.GeneratePropertyReferenceExpressionAction = (actualE) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateExpression(e); Assert.Equal(1, callCount); } [Fact] public void GenerateExpression_CodePropertySetValueReferenceExpression_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodePropertySetValueReferenceExpression(); int callCount = 0; generator.GeneratePropertySetValueReferenceExpressionAction = (actualE) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateExpression(e); Assert.Equal(1, callCount); } [Fact] public void GenerateExpression_CodeSnippetExpression_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeSnippetExpression(); int callCount = 0; generator.GenerateSnippetExpressionAction = (actualE) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateExpression(e); Assert.Equal(1, callCount); } [Fact] public void GenerateExpression_CodeThisReferenceExpression_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeThisReferenceExpression(); int callCount = 0; generator.GenerateThisReferenceExpressionAction = (actualE) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateExpression(e); Assert.Equal(1, callCount); } [Fact] public void GenerateExpression_CodeTypeOfExpression_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeTypeOfExpression(); int callCount = 0; generator.GenerateTypeOfExpressionAction = (actualE, baseMethod) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateExpression(e); Assert.Equal(1, callCount); } [Fact] public void GenerateExpression_CodeTypeReferenceExpression_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeTypeReferenceExpression(); int callCount = 0; generator.GenerateTypeReferenceExpressionAction = (actualE, baseMethod) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateExpression(e); Assert.Equal(1, callCount); } [Fact] public void GenerateVariableReferenceExpression_CodeVariableReferenceExpression_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeVariableReferenceExpression(); int callCount = 0; generator.GenerateVariableReferenceExpressionAction = (actualE) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateExpression(e); Assert.Equal(1, callCount); } [Fact] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Not fixed on NetFX")] public void GenerateExpression_NullE_ThrowsArgumentNullException() { CodeGeneratorTests generator = this; Assert.Throws<ArgumentNullException>("e", () => generator.GenerateExpression(null)); } public static IEnumerable<object[]> GenerateExpression_InvalidE_TestData() { yield return new object[] { new CodeExpression() }; yield return new object[] { new CustomCodeExpression() }; } [Theory] [MemberData(nameof(GenerateExpression_InvalidE_TestData))] public void GenerateExpression_InvalidE_ThrowsArgumentException(CodeExpression e) { CodeGeneratorTests generator = this; Assert.Throws<ArgumentException>("e", () => generator.GenerateExpression(e)); } [Fact] public void GenerateNamespace_InvokeEmpty_Success() { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { var e = new CodeNamespace(); int generateNamespaceStartCallCount = 0; int generateNamespaceEndCallCount = 0; generator.GenerateNamespaceAction = (actualE, baseMethod) => baseMethod(actualE); generator.GenerateNamespaceStartAction = (actualE) => { Assert.Same(e, actualE); Assert.Equal(0, generateNamespaceEndCallCount); writer.Write("NamespaceStart "); generateNamespaceStartCallCount++; }; generator.GenerateNamespaceEndAction = (actualE) => { Assert.Same(e, actualE); writer.Write("NamespaceEnd"); generateNamespaceEndCallCount++; }; generator.GenerateNamespace(e); Assert.Equal(1, generateNamespaceStartCallCount); Assert.Equal(1, generateNamespaceEndCallCount); Assert.Equal($"NamespaceStart {Environment.NewLine}NamespaceEnd", writer.ToString()); }); } [Fact] public void GenerateNamespace_InvokeWithComments_Success() { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { var e = new CodeNamespace(); e.Comments.Add(new CodeCommentStatement("Comment")); e.Comments.Add(new CodeCommentStatement("Comment")); int generateCommentStatementsCallCount = 0; int generateCommentCallCount = 0; int generateNamespaceStartCallCount = 0; int generateNamespaceEndCallCount = 0; generator.GenerateNamespaceAction = (actualE, baseMethod) => baseMethod(actualE); generator.GenerateCommentStatementsAction = (actualE, baseMethod) => { baseMethod(actualE); Assert.Same(e.Comments, actualE); Assert.Equal(0, generateNamespaceStartCallCount); Assert.Equal(0, generateNamespaceEndCallCount); writer.Write("Comments "); generateCommentStatementsCallCount++; }; generator.GenerateCommentAction = (actualE) => { Assert.Same(e.Comments[generateCommentCallCount].Comment, actualE); writer.Write("Comment "); generateCommentCallCount++; }; generator.GenerateNamespaceStartAction = (actualE) => { Assert.Same(e, actualE); Assert.Equal(0, generateNamespaceEndCallCount); writer.Write("NamespaceStart "); generateNamespaceStartCallCount++; }; generator.GenerateNamespaceEndAction = (actualE) => { Assert.Same(e, actualE); writer.Write("NamespaceEnd"); generateNamespaceEndCallCount++; }; generator.GenerateNamespace(e); Assert.Equal(1, generateCommentStatementsCallCount); Assert.Equal(2, generateCommentCallCount); Assert.Equal(1, generateNamespaceStartCallCount); Assert.Equal(1, generateNamespaceEndCallCount); Assert.Equal($"Comment Comment Comments NamespaceStart {Environment.NewLine}NamespaceEnd", writer.ToString()); }); } [Fact] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Not fixed on NetFX")] public void GenerateNamespace_NullE_ThrowsArgumentNullException() { CodeGeneratorTests generator = this; generator.GenerateNamespaceAction = (actualE, baseMethod) => baseMethod(actualE); Assert.Throws<ArgumentNullException>("e", () => generator.GenerateNamespace(null)); } [Fact] public void GenerateNamespace_InvokeWithoutWriter_ThrowsNullReferenceException() { CodeGeneratorTests generator = this; var e = new CodeNamespace(); generator.GenerateNamespaceAction = (actualE, baseMethod) => baseMethod(actualE); generator.GenerateCommentStatementsAction = (actualE, baseMethod) => baseMethod(actualE); generator.GenerateNamespaceStartAction = (actualE) => {}; generator.GenerateNamespaceEndAction = (actualE) => {}; Assert.Throws<NullReferenceException>(() => generator.GenerateNamespace(e)); } [Fact] public void GenerateNamespace_NullValueInE_ThrowsArgumentException() { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { var e = new CodeNamespace(); e.Comments.Add(new CodeCommentStatement()); generator.GenerateNamespaceAction = (actualE, baseMethod) => baseMethod(actualE); generator.GenerateCommentStatementsAction = (actualE, baseMethod) => baseMethod(actualE); generator.GenerateNamespaceStartAction = (actualE) => {}; generator.GenerateNamespaceEndAction = (actualE) => {}; Assert.Throws<ArgumentException>("e", () => generator.GenerateNamespace(e)); }); } [Fact] public void GenerateNamespaceImports_InvokeEmptyWithOutput_Success() { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { var e = new CodeNamespace(); int generateNamespaceCallCount = 0; generator.GenerateNamespaceImportAction = (actualE) => generateNamespaceCallCount++; generator.GenerateNamespaceImports(e); Assert.Equal(0, generateNamespaceCallCount); Assert.Empty(writer.ToString()); }); } [Fact] public void GenerateNamespaceImports_InvokeNonEmptyWithOutput_Success() { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { var e = new CodeNamespace(); e.Imports.Add(new CodeNamespaceImport("Namespace1")); e.Imports.Add(new CodeNamespaceImport("Namespace2")); int generateNamespaceCallCount = 0; generator.GenerateNamespaceImportAction = (actualE) => { Assert.Same(e.Imports[generateNamespaceCallCount], actualE); generateNamespaceCallCount++; }; generator.GenerateNamespaceImports(e); Assert.Equal(2, generateNamespaceCallCount); Assert.Empty(writer.ToString()); }); } [Fact] public void GenerateNamespaceImports_InvokeWithOutputWithLinePragma_Success() { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { var e = new CodeNamespace(); e.Imports.Add(new CodeNamespaceImport("Namespace1") { LinePragma = new CodeLinePragma() }); e.Imports.Add(new CodeNamespaceImport("Namespace2") { LinePragma = new CodeLinePragma() }); int generateLinePragmaStartCallCount = 0; int generateNamespaceCallCount = 0; int generateLinePragmaEndCallCount = 0; generator.GenerateLinePragmaStartAction = (actualE) => { Assert.Same(e.Imports[generateLinePragmaEndCallCount].LinePragma, actualE); Assert.Equal(generateLinePragmaStartCallCount, generateNamespaceCallCount); Assert.Equal(generateLinePragmaStartCallCount, generateLinePragmaEndCallCount); writer.Write("LinePragmaStart "); generateLinePragmaStartCallCount++; }; generator.GenerateNamespaceImportAction = (actualE) => { Assert.Same(e.Imports[generateLinePragmaEndCallCount], actualE); Assert.Equal(generateNamespaceCallCount, generateLinePragmaEndCallCount); writer.Write("Namespace "); generateNamespaceCallCount++; }; generator.GenerateLinePragmaEndAction = (actualE) => { Assert.Same(e.Imports[generateLinePragmaEndCallCount].LinePragma, actualE); writer.Write("LinePragmaEnd"); generateLinePragmaEndCallCount++; }; generator.GenerateNamespaceImports(e); Assert.Equal(2, generateLinePragmaStartCallCount); Assert.Equal(2, generateNamespaceCallCount); Assert.Equal(2, generateLinePragmaEndCallCount); Assert.Equal("LinePragmaStart Namespace LinePragmaEndLinePragmaStart Namespace LinePragmaEnd", writer.ToString()); }); } [Fact] public void GenerateNamespaceImports_InvokeEmptyWithoutOutput_Success() { CodeGeneratorTests generator = this; var e = new CodeNamespace(); int generateNamespaceCallCount = 0; generator.GenerateNamespaceImportAction = (actualE) => generateNamespaceCallCount++; generator.GenerateNamespaceImports(e); Assert.Equal(0, generateNamespaceCallCount); } [Fact] public void GenerateNamespaceImports_InvokeWithoutOutput_Success() { CodeGeneratorTests generator = this; var e = new CodeNamespace(); e.Imports.Add(new CodeNamespaceImport("Namespace1")); e.Imports.Add(new CodeNamespaceImport("Namespace2")); int generateNamespaceCallCount = 0; generator.GenerateNamespaceImportAction = (actualE) => { Assert.Same(e.Imports[generateNamespaceCallCount], actualE); generateNamespaceCallCount++; }; generator.GenerateNamespaceImports(e); Assert.Equal(2, generateNamespaceCallCount); } [Fact] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Not fixed on NetFX")] public void GenerateNamespaceImports_NullE_ThrowsArgumentNullException() { CodeGeneratorTests generator = this; Assert.Throws<ArgumentNullException>("e", () => generator.GenerateNamespaceImports(null)); } public static IEnumerable<object[]> GenerateParameterDeclarationExpression_TestData() { yield return new object[] { null, null, FieldDirection.In, "Type " }; yield return new object[] { string.Empty, string.Empty, FieldDirection.In, "Type " }; yield return new object[] { "type", "name", FieldDirection.In, "Type name" }; yield return new object[] { null, null, FieldDirection.Out, "out Type " }; yield return new object[] { string.Empty, string.Empty, FieldDirection.Out, "out Type " }; yield return new object[] { "type", "name", FieldDirection.Out, "out Type name" }; yield return new object[] { null, null, FieldDirection.Ref, "ref Type " }; yield return new object[] { string.Empty, string.Empty, FieldDirection.Ref, "ref Type " }; yield return new object[] { "type", "name", FieldDirection.Ref, "ref Type name" }; yield return new object[] { null, null, FieldDirection.In - 1, "Type " }; yield return new object[] { string.Empty, string.Empty, FieldDirection.In - 1, "Type " }; yield return new object[] { "type", "name", FieldDirection.In - 1, "Type name" }; yield return new object[] { null, null, FieldDirection.Ref + 1, "Type " }; yield return new object[] { string.Empty, string.Empty, FieldDirection.Ref + 1, "Type " }; yield return new object[] { "type", "name", FieldDirection.Ref + 1, "Type name" }; } [Theory] [MemberData(nameof(GenerateParameterDeclarationExpression_TestData))] public void GenerateParameterDeclarationExpression_Invoke_Success(string type, string name, FieldDirection direction, string expected) { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { var e = new CodeParameterDeclarationExpression(type, name) { Direction = direction }; generator.GenerateParameterDeclarationExpressionAction = (actualE, baseMethod) => baseMethod(actualE); int outputDirectionCallCount = 0; int outputTypeNamePairCallCount = 0; int outputTypeCallCount = 0; generator.OutputDirectionAction = (actualDirection, baseMethod) => { baseMethod(actualDirection); Assert.Equal(e.Direction, actualDirection); Assert.Equal(0, outputTypeNamePairCallCount); Assert.Equal(0, outputTypeCallCount); outputDirectionCallCount++; }; generator.OutputTypeNamePairAction = (actualType, actualName, baseMethod) => { baseMethod(actualType, actualName); Assert.Same(e.Type, actualType); Assert.Same(e.Name, actualName); outputTypeNamePairCallCount++; }; generator.OutputTypeAction = (actualType) => { Assert.Same(e.Type, actualType); writer.Write("Type"); outputTypeCallCount++; }; generator.OutputIdentifierAction = (actualIdent, baseMethod) => baseMethod(actualIdent); generator.GenerateParameterDeclarationExpression(e); Assert.Equal(expected, writer.ToString()); Assert.Equal(1, outputDirectionCallCount); Assert.Equal(1, outputTypeNamePairCallCount); Assert.Equal(1, outputTypeCallCount); }); } [Fact] public void GenerateParameterDeclarationExpression_InvokeWithCustomAttributes_Success() { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { var e = new CodeParameterDeclarationExpression("Type", "Name") { Direction = FieldDirection.Ref }; e.CustomAttributes.Add(new CodeAttributeDeclaration("name")); e.CustomAttributes.Add(new CodeAttributeDeclaration("name")); generator.GenerateParameterDeclarationExpressionAction = (actualE, baseMethod) => baseMethod(actualE); int outputAttributeDeclarationsCallCount = 0; int outputDirectionCallCount = 0; int outputTypeNamePairCallCount = 0; int outputTypeCallCount = 0; generator.OutputAttributeDeclarationsAction = (actualAttributes, baseMethod) => { baseMethod(actualAttributes); Assert.Same(e.CustomAttributes, actualAttributes); Assert.Equal(0, outputDirectionCallCount); Assert.Equal(0, outputTypeNamePairCallCount); Assert.Equal(0, outputTypeCallCount); outputAttributeDeclarationsCallCount++; }; generator.GenerateAttributeDeclarationsStartAction = (actualArg) => writer.Write("StartAttributes "); generator.OutputAttributeArgumentAction = (actualArg, baseMethod) => baseMethod(actualArg); generator.GenerateAttributeDeclarationsEndAction = (actualArg) => writer.Write(" EndAttributes"); generator.OutputDirectionAction = (actualDirection, baseMethod) => { baseMethod(actualDirection); Assert.Equal(e.Direction, actualDirection); Assert.Equal(0, outputTypeNamePairCallCount); Assert.Equal(0, outputTypeCallCount); outputDirectionCallCount++; }; generator.OutputTypeNamePairAction = (actualType, actualName, baseMethod) => { baseMethod(actualType, actualName); Assert.Same(e.Type, actualType); Assert.Same(e.Name, actualName); outputTypeNamePairCallCount++; }; generator.OutputTypeAction = (actualType) => { Assert.Same(e.Type, actualType); writer.Write("Type"); outputTypeCallCount++; }; generator.OutputIdentifierAction = (actualIdent, baseMethod) => baseMethod(actualIdent); generator.GenerateParameterDeclarationExpression(e); Assert.Equal($"StartAttributes name(), {Environment.NewLine}name() EndAttributes ref Type Name", writer.ToString()); Assert.Equal(1, outputAttributeDeclarationsCallCount); Assert.Equal(1, outputDirectionCallCount); Assert.Equal(1, outputTypeNamePairCallCount); Assert.Equal(1, outputTypeCallCount); }); } [Fact] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Not fixed on NetFX")] public void GenerateParameterDeclarationExpression_NullE_ThrowsArgumentNullException() { CodeGeneratorTests generator = this; generator.GenerateParameterDeclarationExpressionAction = (actualE, baseMethod) => baseMethod(actualE); Assert.Throws<ArgumentNullException>("e", () => generator.GenerateParameterDeclarationExpression(null)); } [Fact] public void GenerateParameterDeclarationExpression_InvokeWithoutOutput_ThrowsNullReferenceException() { CodeGeneratorTests generator = this; var e = new CodeParameterDeclarationExpression(); generator.GenerateParameterDeclarationExpressionAction = (actualE, baseMethod) => baseMethod(actualE); generator.OutputTypeNamePairAction = (actualType, actualName, baseMethod) => baseMethod(actualType, actualName); generator.OutputDirectionAction = (actualDirection, baseMethod) => baseMethod(actualDirection); generator.OutputTypeAction = (actualType) => { }; Assert.Throws<NullReferenceException>(() => generator.GenerateParameterDeclarationExpression(e)); } public static IEnumerable<object[]> GeneratePrimitiveExpression_TestData() { yield return new object[] { null, "NullToken" }; yield return new object[] { 'a', "'a'" }; yield return new object[] { (short)1, "1" }; yield return new object[] { 1, "1" }; yield return new object[] { (long)1, "1" }; yield return new object[] { (byte)1, "1" }; yield return new object[] { true, "true" }; yield return new object[] { false, "false" }; } [Theory] [MemberData(nameof(GeneratePrimitiveExpression_TestData))] public void GeneratePrimitiveExpression_Invoke_Success(object value, string expected) { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { var e = new CodePrimitiveExpression(value); generator.GeneratePrimitiveExpressionAction = (actualE, baseMethod) => baseMethod(actualE); generator.GeneratePrimitiveExpression(e); Assert.Equal(expected, writer.ToString()); }); } [Fact] public void GeneratePrimitiveExpression_InvokeFloat_Success() { CodeGeneratorTests generator = this; var e = new CodePrimitiveExpression((float)1); generator.GeneratePrimitiveExpressionAction = (actualE, baseMethod) => baseMethod(actualE); int generateSingleFloatValueCallCount = 0; generator.GenerateSingleFloatValueAction = (actualValue, baseMethod) => { Assert.Equal((float)1, actualValue); generateSingleFloatValueCallCount++; }; generator.GeneratePrimitiveExpression(e); Assert.Equal(1, generateSingleFloatValueCallCount); } [Fact] public void GeneratePrimitiveExpression_InvokeDouble_Success() { CodeGeneratorTests generator = this; var e = new CodePrimitiveExpression((double)1); generator.GeneratePrimitiveExpressionAction = (actualE, baseMethod) => baseMethod(actualE); int generateDoubleValueCallCount = 0; generator.GenerateDoubleValueAction = (actualValue, baseMethod) => { Assert.Equal((double)1, actualValue); generateDoubleValueCallCount++; }; generator.GeneratePrimitiveExpression(e); Assert.Equal(1, generateDoubleValueCallCount); } [Fact] public void GeneratePrimitiveExpression_InvokeDecimal_Success() { CodeGeneratorTests generator = this; var e = new CodePrimitiveExpression((decimal)1); generator.GeneratePrimitiveExpressionAction = (actualE, baseMethod) => baseMethod(actualE); int generateDecimalValueCallCount = 0; generator.GenerateDecimalValueAction = (actualValue, baseMethod) => { Assert.Equal((decimal)1, actualValue); generateDecimalValueCallCount++; }; generator.GeneratePrimitiveExpression(e); Assert.Equal(1, generateDecimalValueCallCount); } [Theory] [InlineData(null, "")] [InlineData("", "")] [InlineData("result", "result")] public void GeneratePrimitiveExpression_InvokeString_Success(string result, string expected) { CodeGeneratorTests generator = this; generator.GeneratePrimitiveExpressionAction = (actualE, baseMethod) => baseMethod(actualE); PerformActionWithOutput(writer => { var e = new CodePrimitiveExpression("value"); generator.GeneratePrimitiveExpressionAction = (actualE, baseMethod) => baseMethod(actualE); int quoteSnippetCallCount = 0; generator.QuoteSnippetStringAction = (actualValue) => { Assert.Equal("value", actualValue); quoteSnippetCallCount++; return result; }; generator.GeneratePrimitiveExpression(e); Assert.Equal(expected, writer.ToString()); Assert.Equal(1, quoteSnippetCallCount); }); } [Fact] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Not fixed on NetFX")] public void GeneratePrimitiveExpression_NullE_ThrowsArgumentNullException() { CodeGeneratorTests generator = this; generator.GeneratePrimitiveExpressionAction = (actualE, baseMethod) => baseMethod(actualE); Assert.Throws<ArgumentNullException>("e", () => generator.GeneratePrimitiveExpression(null)); } public static IEnumerable<object[]> GeneratePrimitiveExpression_InvalidEValue_TestData() { yield return new object[] { new object() }; yield return new object[] { DBNull.Value }; yield return new object[] { new DateTime() }; yield return new object[] { (sbyte)1 }; yield return new object[] { (ushort)1 }; yield return new object[] { (uint)1 }; yield return new object[] { (ulong)1 }; } [Theory] [MemberData(nameof(GeneratePrimitiveExpression_InvalidEValue_TestData))] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Not fixed on NetFX")] public void GeneratePrimitiveExpression_InvalidE_ThrowsArgumentException(object value) { CodeGeneratorTests generator = this; var e = new CodePrimitiveExpression(value); generator.GeneratePrimitiveExpressionAction = (actualE, baseMethod) => baseMethod(actualE); Assert.Throws<ArgumentException>("e", () => generator.GeneratePrimitiveExpression(e)); } public static IEnumerable<object[]> GeneratePrimitiveExpression_WithoutOutput_TestData() { yield return new object[] { null }; yield return new object[] { "" }; yield return new object[] { "value" }; yield return new object[] { 'a' }; yield return new object[] { (short)1 }; yield return new object[] { 1 }; yield return new object[] { (long)1 }; yield return new object[] { (byte)1 }; yield return new object[] { (float)1 }; yield return new object[] { (double)1 }; yield return new object[] { (decimal)1 }; yield return new object[] { true }; yield return new object[] { false }; } [Theory] [MemberData(nameof(GeneratePrimitiveExpression_WithoutOutput_TestData))] public void GeneratePrimitiveExpression_InvokeWithoutOutput_ThrowsNullRefereneException(object value) { CodeGeneratorTests generator = this; var e = new CodePrimitiveExpression(value); generator.GeneratePrimitiveExpressionAction = (actualE, baseMethod) => baseMethod(actualE); generator.GenerateSingleFloatValueAction = (actualS, baseMethod) => baseMethod(actualS); generator.GenerateDoubleValueAction = (actualD, baseMethod) => baseMethod(actualD); generator.GenerateDecimalValueAction = (actualD, baseMethod) => baseMethod(actualD); generator.QuoteSnippetStringAction = (actualValue) => actualValue; Assert.Throws<NullReferenceException>(() => generator.GeneratePrimitiveExpression(e)); } [Fact] public void GenerateSingleFloatValue_Invoke_Success() { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { generator.GenerateSingleFloatValueAction = (actualS, baseMethod) => baseMethod(actualS); generator.GenerateSingleFloatValue(float.MaxValue); Assert.Equal(float.MaxValue.ToString("R", CultureInfo.InvariantCulture.NumberFormat), writer.ToString()); }); } [Fact] public void GenerateSingleFloatValue_InvokeWithoutWriter_ThrowsNullReferenceException() { CodeGeneratorTests generator = this; generator.GenerateSingleFloatValueAction = (actualS, baseMethod) => baseMethod(actualS); Assert.Throws<NullReferenceException>(() => generator.GenerateSingleFloatValue(1)); } [Theory] [InlineData(null, "")] [InlineData("", "")] [InlineData("value", "value")] public void GenerateSnippetCompileUnit_Invoke_Success(string value, string expected) { CodeGeneratorTests generator = this; var e = new CodeSnippetCompileUnit(value); PerformActionWithOutput(writer => { generator.GenerateSnippetCompileUnit(e); Assert.Equal(expected + Environment.NewLine, writer.ToString()); }); } [Fact] public void GenerateSnippetCompileUnit_InvokeWithDirectivesAndLinePragma_Success() { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { var e = new CodeSnippetCompileUnit("value") { LinePragma = new CodeLinePragma() }; e.StartDirectives.Add(new CodeDirective()); e.StartDirectives.Add(new CodeChecksumPragma()); e.StartDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); e.EndDirectives.Add(new CodeDirective()); e.EndDirectives.Add(new CodeChecksumPragma()); e.EndDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); int generateLinePragmaStartCallCount = 0; int generateDirectivesCallCount = 0; int generateLinePragmaEndCallCount = 0; generator.GenerateLinePragmaStartAction = (actualE) => { Assert.Same(e.LinePragma, actualE); Assert.Equal(0, generateLinePragmaEndCallCount); Assert.Equal(1, generateDirectivesCallCount); writer.Write("LinePragmaStart "); generateLinePragmaStartCallCount++; }; generator.GenerateLinePragmaEndAction = (actualE) => { Assert.Same(e.LinePragma, actualE); Assert.Equal(1, generateDirectivesCallCount); writer.Write("LinePragmaEnd "); generateLinePragmaEndCallCount++; }; generator.GenerateDirectivesAction = (actualDirectives, baseMethod) => { baseMethod(actualDirectives); Assert.Same(generateDirectivesCallCount == 0 ? e.StartDirectives : e.EndDirectives, actualDirectives); writer.Write(generateDirectivesCallCount == 0 ? "StartDirectives " : "EndDirectives"); generateDirectivesCallCount++; }; generator.GenerateSnippetCompileUnit(e); Assert.Equal($"StartDirectives LinePragmaStart value{Environment.NewLine}LinePragmaEnd EndDirectives", writer.ToString()); Assert.Equal(1, generateLinePragmaStartCallCount); Assert.Equal(2, generateDirectivesCallCount); Assert.Equal(1, generateLinePragmaEndCallCount); }); } [Fact] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Not fixed on NetFX")] public void GenerateSnippetCompileUnit_NullE_ThrowsArgumentNullException() { CodeGeneratorTests generator = this; Assert.Throws<ArgumentNullException>("e", () => generator.GenerateSnippetCompileUnit(null)); } [Fact] public void GenerateSnippetCompileUnit_InvokeWithoutOutput_ThrowsNullReferenceException() { CodeGeneratorTests generator = this; var e = new CodeSnippetCompileUnit(); Assert.Throws<NullReferenceException>(() => generator.GenerateSnippetCompileUnit(e)); } [Theory] [InlineData(null, "")] [InlineData("", "")] [InlineData("value", "value")] public void GenerateSnippetStatement_Invoke_Success(string value, string expected) { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { generator.GenerateSnippetStatementAction = (actualE, baseMethod) => baseMethod(actualE); var e = new CodeSnippetStatement(value); generator.GenerateSnippetStatement(e); Assert.Equal(expected + Environment.NewLine, writer.ToString()); }); } [Fact] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Not fixed on NetFX")] public void GenerateSnippetStatement_NullE_ThrowsArgumentNullException() { CodeGeneratorTests generator = this; generator.GenerateSnippetStatementAction = (actualE, baseMethod) => baseMethod(actualE); Assert.Throws<ArgumentNullException>("e", () => generator.GenerateSnippetStatement(null)); } [Fact] public void GenerateStatement_InvokeWithDirectivesAndLinePragma_Success() { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { var e = new CodeGotoStatement { LinePragma = new CodeLinePragma() }; e.StartDirectives.Add(new CodeDirective()); e.StartDirectives.Add(new CodeChecksumPragma()); e.StartDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); e.EndDirectives.Add(new CodeDirective()); e.EndDirectives.Add(new CodeChecksumPragma()); e.EndDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); int generateLinePragmaStartCallCount = 0; int generateDirectivesCallCount = 0; int generateStatementCallCount = 0; int generateLinePragmaEndCallCount = 0; generator.GenerateLinePragmaStartAction = (actualE) => { Assert.Same(e.LinePragma, actualE); Assert.Equal(0, generateLinePragmaEndCallCount); Assert.Equal(0, generateStatementCallCount); Assert.Equal(1, generateDirectivesCallCount); writer.Write("LinePragmaStart "); generateLinePragmaStartCallCount++; }; generator.GenerateGotoStatementAction = (actualE) => { Assert.Same(e, actualE); Assert.Equal(0, generateLinePragmaEndCallCount); Assert.Equal(1, generateDirectivesCallCount); writer.Write("Statement "); generateStatementCallCount++; }; generator.GenerateLinePragmaEndAction = (actualE) => { Assert.Same(e.LinePragma, actualE); Assert.Equal(1, generateDirectivesCallCount); writer.Write("LinePragmaEnd "); generateLinePragmaEndCallCount++; }; generator.GenerateDirectivesAction = (actualDirectives, baseMethod) => { baseMethod(actualDirectives); Assert.Same(generateDirectivesCallCount == 0 ? e.StartDirectives : e.EndDirectives, actualDirectives); writer.Write(generateDirectivesCallCount == 0 ? "StartDirectives " : "EndDirectives"); generateDirectivesCallCount++; }; generator.GenerateStatement(e); Assert.Equal($"StartDirectives LinePragmaStart Statement LinePragmaEnd EndDirectives", writer.ToString()); Assert.Equal(1, generateLinePragmaStartCallCount); Assert.Equal(1, generateStatementCallCount); Assert.Equal(2, generateDirectivesCallCount); Assert.Equal(1, generateLinePragmaEndCallCount); }); } [Fact] public void GenerateStatement_CodeAssignStatement_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeAssignStatement(); int callCount = 0; generator.GenerateAssignStatementAction = (actualE) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateStatement(e); Assert.Equal(1, callCount); } [Fact] public void GenerateStatement_CodeAttachEventStatement_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeAttachEventStatement(); int callCount = 0; generator.GenerateAttachEventStatementAction = (actualE) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateStatement(e); Assert.Equal(1, callCount); } [Fact] public void GenerateStatement_CodeConditionStatement_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeConditionStatement(); int callCount = 0; generator.GenerateConditionStatementAction = (actualE) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateStatement(e); Assert.Equal(1, callCount); } [Fact] public void GenerateStatement_CodeExpressionStatement_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeExpressionStatement(); int callCount = 0; generator.GenerateExpressionStatementAction = (actualE) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateStatement(e); Assert.Equal(1, callCount); } [Fact] public void GenerateStatement_CodeGotoStatement_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeGotoStatement(); int callCount = 0; generator.GenerateGotoStatementAction = (actualE) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateStatement(e); Assert.Equal(1, callCount); } [Fact] public void GenerateStatement_CodeIterationStatement_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeIterationStatement(); int callCount = 0; generator.GenerateIterationStatementAction = (actualE) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateStatement(e); Assert.Equal(1, callCount); } [Fact] public void GenerateStatement_CodeLabeledStatement_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeLabeledStatement(); int callCount = 0; generator.GenerateLabeledStatementAction = (actualE) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateStatement(e); Assert.Equal(1, callCount); } [Fact] public void GenerateStatement_CodeMethodReturnStatement_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeMethodReturnStatement(); int callCount = 0; generator.GenerateMethodReturnStatementAction = (actualE) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateStatement(e); Assert.Equal(1, callCount); } [Fact] public void GenerateStatement_CodeRemoveEventStatement_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeRemoveEventStatement(); int callCount = 0; generator.GenerateRemoveEventStatementAction = (actualE) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateStatement(e); Assert.Equal(1, callCount); } [Fact] public void GenerateStatement_CodeSnippetStatement_CallsCorrectMethod() { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { generator.Indent = 1; var e = new CodeSnippetStatement(); int callCount = 0; generator.GenerateSnippetStatementAction = (actualE, baseMethod) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateStatement(e); Assert.Equal(1, callCount); Assert.Equal(1, generator.Indent); }); } [Fact] public void GenerateStatement_CodeThrowExpressionStatement_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeThrowExceptionStatement(); int callCount = 0; generator.GenerateThrowExceptionStatementAction = (actualE) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateStatement(e); Assert.Equal(1, callCount); } [Fact] public void GenerateStatement_CodeTryCatchFinallyStatement_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeTryCatchFinallyStatement(); int callCount = 0; generator.GenerateTryCatchFinallyStatementAction = (actualE) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateStatement(e); Assert.Equal(1, callCount); } [Fact] public void GenerateStatement_CodeVariableDeclarationStatement_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeVariableDeclarationStatement(); int callCount = 0; generator.GenerateVariableDeclarationStatementAction = (actualE) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateStatement(e); Assert.Equal(1, callCount); } [Fact] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Not fixed on NetFX")] public void GenerateStatement_NullE_ThrowsArgumentNullException() { CodeGeneratorTests generator = this; Assert.Throws<ArgumentNullException>("e", () => generator.GenerateStatement(null)); } public static IEnumerable<object[]> GenerateStatement_InvalidE_TestData() { yield return new object[] { new CodeStatement() }; yield return new object[] { new CustomCodeStatement() }; } [Theory] [MemberData(nameof(GenerateStatement_InvalidE_TestData))] public void GenerateStatement_InvalidE_ThrowsArgumentException(CodeStatement e) { CodeGeneratorTests generator = this; Assert.Throws<ArgumentException>("e", () => generator.GenerateStatement(e)); } [Fact] public void GenerateStatement_CodeSnippetStatementWithoutOutput_ThrowsNullReferenceException() { CodeGeneratorTests generator = this; var e = new CodeSnippetStatement(); generator.GenerateSnippetStatementAction = (actualE, baseMethod) => baseMethod(actualE); Assert.Throws<NullReferenceException>(() => generator.GenerateStatement(e)); } [Fact] public void GenerateStatements_InvokeWithWriter_Success() { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { var stmts = new CodeStatementCollection(new CodeStatement[] { new CodeGotoStatement(), new CodeGotoStatement() }); int generateStatementCallCount = 0; generator.GenerateGotoStatementAction = (actualE) => { Assert.Same(stmts[generateStatementCallCount], actualE); generateStatementCallCount++; }; generator.GenerateStatements(stmts); Assert.Equal(2, generateStatementCallCount); }); } [Fact] public void GenerateStatements_InvokeEmptyStatementsWithWriter_Success() { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { var stmts = new CodeStatementCollection(); generator.GenerateStatements(stmts); Assert.Empty(writer.ToString()); }); } [Fact] public void GenerateStatements_InvokeEmptyStatementsWithoutWriter_Nop() { CodeGeneratorTests generator = this; var stmts = new CodeStatementCollection(); generator.GenerateStatements(stmts); } [Fact] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Not fixed on NetFX")] public void GenerateStatements_NullStmts_ThrowsArgumentNullException() { CodeGeneratorTests generator = this; Assert.Throws<ArgumentNullException>("stmts", () => generator.GenerateStatements(null)); } [Fact] public void GenerateStatements_InvalidStatementInStmts_ThrowsArgumentException() { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { var stmts = new CodeStatementCollection(new CodeStatement[] { new CodeStatement() }); Assert.Throws<ArgumentException>("e", () => generator.GenerateStatements(stmts)); }); } [Fact] public void GenerateStatements_InvokeWithoutWriter_ThrowsNullReferenceException() { CodeGeneratorTests generator = this; var stmts = new CodeStatementCollection(new CodeStatement[] { new CodeStatement() }); Assert.Throws<NullReferenceException>(() => generator.GenerateStatements(stmts)); } [Fact] public void GenerateTypeOfExpression_Invoke_Success() { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { var e = new CodeTypeOfExpression(new CodeTypeReference()); generator.GenerateTypeOfExpressionAction = (actualE, baseMethod) => baseMethod(actualE); int outputTypeCallCount = 0; generator.OutputTypeAction = (actualTypeRef) => { Assert.Same(e.Type, actualTypeRef); writer.Write("Type"); outputTypeCallCount++; }; generator.GenerateTypeOfExpression(e); Assert.Equal("typeof(Type)", writer.ToString()); Assert.Equal(1, outputTypeCallCount); }); } [Fact] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Not fixed on NetFX")] public void GenerateTypeOfExpression_NullE_ThrowsArgumentNullException() { CodeGeneratorTests generator = this; generator.GenerateTypeOfExpressionAction = (actualE, baseMethod) => baseMethod(actualE); Assert.Throws<ArgumentNullException>("e", () => generator.GenerateTypeOfExpression(null)); } [Fact] public void GenerateTypeOfExpression_InvokeWithoutWriter_ThrowsNullReferenceException() { CodeGeneratorTests generator = this; var e = new CodeTypeOfExpression(); generator.GenerateTypeOfExpressionAction = (actualE, baseMethod) => baseMethod(actualE); Assert.Throws<NullReferenceException>(() => generator.GenerateTypeOfExpression(e)); } [Fact] public void GenerateTypeReferenceExpression_Invoke_Success() { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { var e = new CodeTypeReferenceExpression(new CodeTypeReference()); generator.GenerateTypeReferenceExpressionAction = (actualE, baseMethod) => baseMethod(actualE); int outputTypeCallCount = 0; generator.OutputTypeAction = (actualTypeRef) => { Assert.Same(e.Type, actualTypeRef); writer.Write("Type"); outputTypeCallCount++; }; generator.GenerateTypeReferenceExpression(e); Assert.Equal("Type", writer.ToString()); Assert.Equal(1, outputTypeCallCount); }); } [Fact] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Not fixed on NetFX")] public void GenerateTypeReferenceExpression_NullE_ThrowsArgumentNullException() { CodeGeneratorTests generator = this; generator.GenerateTypeReferenceExpressionAction = (actualE, baseMethod) => baseMethod(actualE); Assert.Throws<ArgumentNullException>("e", () => generator.GenerateTypeReferenceExpression(null)); } public static IEnumerable<object[]> GenerateTypes_TestData() { yield return new object[] { new CodeTypeDeclaration(), null, $"{Environment.NewLine}TypeStart TypeEnd{Environment.NewLine}TypeStart TypeEnd" }; yield return new object[] { new CodeTypeDeclaration(), new CodeGeneratorOptions(), $"{Environment.NewLine}TypeStart TypeEnd{Environment.NewLine}TypeStart TypeEnd" }; yield return new object[] { new CodeTypeDeclaration(), new CodeGeneratorOptions { BlankLinesBetweenMembers = false }, $"TypeStart TypeEndTypeStart TypeEnd" }; yield return new object[] { new CodeTypeDeclaration("name") { IsClass = true }, null, $"{Environment.NewLine}TypeStart TypeEnd{Environment.NewLine}TypeStart TypeEnd" }; yield return new object[] { new CodeTypeDeclaration("name") { IsEnum = true }, null, $"{Environment.NewLine}TypeStart TypeEnd{Environment.NewLine}TypeStart TypeEnd" }; yield return new object[] { new CodeTypeDeclaration("name") { IsInterface = true }, null, $"{Environment.NewLine}TypeStart TypeEnd{Environment.NewLine}TypeStart TypeEnd" }; yield return new object[] { new CodeTypeDeclaration("name") { IsStruct = true }, null, $"{Environment.NewLine}TypeStart TypeEnd{Environment.NewLine}TypeStart TypeEnd" }; } [Theory] [MemberData(nameof(GenerateTypes_TestData))] public void GenerateTypes_InvokeClassWithWriter_Success(CodeTypeDeclaration type, CodeGeneratorOptions options, string expected) { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { var e = new CodeNamespace(); e.Types.Add(new CodeTypeDeclaration()); e.Types.Add(type); int generateTypeStartCallCount = 0; int generateTypeEndCallCount = 0; generator.GenerateTypeStartAction = (actualE) => { Assert.Same(e.Types[generateTypeStartCallCount], actualE); Assert.Equal(generateTypeStartCallCount, generateTypeEndCallCount); writer.Write("TypeStart "); generateTypeStartCallCount++; }; generator.GenerateTypeEndAction = (actualE) => { Assert.Same(e.Types[generateTypeEndCallCount], actualE); writer.Write("TypeEnd"); generateTypeEndCallCount++; }; generator.GenerateTypes(e); Assert.Equal(expected, writer.ToString()); Assert.Equal(2, generateTypeStartCallCount); Assert.Equal(2, generateTypeEndCallCount); Assert.Same(e.Types[1], generator.CurrentClass); Assert.Null(generator.CurrentMember); Assert.Equal("<% unknown %>", generator.CurrentMemberName); Assert.Same(e.Types[1].Name, generator.CurrentTypeName); Assert.Equal(e.Types[1].IsClass, generator.IsCurrentClass); Assert.False(generator.IsCurrentDelegate); Assert.Equal(e.Types[1].IsEnum, generator.IsCurrentEnum); Assert.Equal(e.Types[1].IsInterface, generator.IsCurrentInterface); Assert.Equal(e.Types[1].IsStruct, generator.IsCurrentStruct); }, options); } [Fact] public void GenerateTypes_InvokeDelegateWithWriter_Success() { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { var e = new CodeNamespace(); e.Types.Add(new CodeTypeDeclaration()); e.Types.Add(new CodeTypeDelegate("name")); int generateTypeStartCallCount = 0; int generateTypeEndCallCount = 0; generator.GenerateTypeStartAction = (actualE) => { Assert.Same(e.Types[generateTypeStartCallCount], actualE); Assert.Equal(generateTypeStartCallCount, generateTypeEndCallCount); writer.Write("TypeStart "); generateTypeStartCallCount++; }; generator.GenerateTypeEndAction = (actualE) => { Assert.Same(e.Types[generateTypeEndCallCount], actualE); writer.Write("TypeEnd"); generateTypeEndCallCount++; }; generator.GenerateTypes(e); Assert.Equal($"{Environment.NewLine}TypeStart TypeEnd{Environment.NewLine}TypeStart TypeEnd", writer.ToString()); Assert.Equal(2, generateTypeStartCallCount); Assert.Equal(2, generateTypeEndCallCount); Assert.Same(e.Types[1], generator.CurrentClass); Assert.Null(generator.CurrentMember); Assert.Equal("<% unknown %>", generator.CurrentMemberName); Assert.Same(e.Types[1].Name, generator.CurrentTypeName); Assert.False(generator.IsCurrentClass); Assert.True(generator.IsCurrentDelegate); Assert.False(generator.IsCurrentEnum); Assert.False(generator.IsCurrentInterface); Assert.False(generator.IsCurrentStruct); }); } [Fact] public void GenerateTypes_InvokeWithCommentsDirectivesAndLinePragma_Success() { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { var type = new CodeTypeDeclaration { LinePragma = new CodeLinePragma() }; type.Comments.Add(new CodeCommentStatement("Comment")); type.Comments.Add(new CodeCommentStatement("Comment")); type.StartDirectives.Add(new CodeDirective()); type.StartDirectives.Add(new CodeChecksumPragma()); type.StartDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); type.EndDirectives.Add(new CodeDirective()); type.EndDirectives.Add(new CodeChecksumPragma()); type.EndDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); var e = new CodeNamespace(); e.Types.Add(type); int generateCommentStatementsCallCount = 0; int generateCommentCallCount = 0; int generateLinePragmaStartCallCount = 0; int generateTypeStartCallCount = 0; int generateTypeEndCallCount = 0; int generateDirectivesCallCount = 0; int generateLinePragmaEndCallCount = 0; generator.GenerateCommentStatementsAction = (actualE, baseMethod) => { baseMethod(actualE); Assert.Same(type.Comments, actualE); writer.Write("Comments "); Assert.Equal(0, generateLinePragmaStartCallCount); Assert.Equal(0, generateTypeStartCallCount); Assert.Equal(1, generateDirectivesCallCount); Assert.Equal(0, generateLinePragmaEndCallCount); Assert.Equal(0, generateTypeEndCallCount); generateCommentStatementsCallCount++; }; generator.GenerateCommentAction = (actualE) => { Assert.Same(type.Comments[generateCommentCallCount].Comment, actualE); writer.Write("Comment "); generateCommentCallCount++; }; generator.GenerateLinePragmaStartAction = (actualE) => { Assert.Same(type.LinePragma, actualE); Assert.Equal(0, generateTypeStartCallCount); Assert.Equal(1, generateDirectivesCallCount); Assert.Equal(0, generateLinePragmaEndCallCount); Assert.Equal(0, generateTypeEndCallCount); writer.Write("LinePragmaStart "); generateLinePragmaStartCallCount++; }; generator.GenerateTypeStartAction = (actualE) => { Assert.Same(type, actualE); Assert.Equal(1, generateDirectivesCallCount); Assert.Equal(0, generateLinePragmaEndCallCount); Assert.Equal(0, generateTypeEndCallCount); writer.Write("TypeStart "); generateTypeStartCallCount++; }; generator.GenerateTypeEndAction = (actualE) => { Assert.Same(type, actualE); Assert.Equal(1, generateDirectivesCallCount); Assert.Equal(0, generateLinePragmaEndCallCount); writer.Write("TypeEnd "); generateTypeEndCallCount++; }; generator.GenerateLinePragmaEndAction = (actualE) => { Assert.Same(type.LinePragma, actualE); Assert.Equal(1, generateDirectivesCallCount); Assert.Equal(1, generateTypeEndCallCount); writer.Write("LinePragmaEnd "); generateLinePragmaEndCallCount++; }; generator.GenerateDirectivesAction = (actualDirectives, baseMethod) => { baseMethod(actualDirectives); Assert.Same(generateDirectivesCallCount == 0 ? type.StartDirectives : type.EndDirectives, actualDirectives); writer.Write(generateDirectivesCallCount == 0 ? "StartDirectives " : "EndDirectives"); generateDirectivesCallCount++; }; generator.GenerateTypes(e); Assert.Equal($"{Environment.NewLine}StartDirectives Comment Comment Comments LinePragmaStart TypeStart TypeEnd LinePragmaEnd EndDirectives", writer.ToString()); Assert.Equal(1, generateCommentStatementsCallCount); Assert.Equal(2, generateCommentCallCount); Assert.Equal(1, generateLinePragmaStartCallCount); Assert.Equal(1, generateTypeStartCallCount); Assert.Equal(1, generateTypeEndCallCount); Assert.Equal(1, generateLinePragmaEndCallCount); Assert.Equal(2, generateDirectivesCallCount); Assert.Same(type, generator.CurrentClass); Assert.Null(generator.CurrentMember); Assert.Equal("<% unknown %>", generator.CurrentMemberName); Assert.Same(type.Name, generator.CurrentTypeName); Assert.Equal(type.IsClass, generator.IsCurrentClass); Assert.False(generator.IsCurrentDelegate); Assert.Equal(type.IsEnum, generator.IsCurrentEnum); Assert.Equal(type.IsInterface, generator.IsCurrentInterface); Assert.Equal(type.IsStruct, generator.IsCurrentStruct); }); } [Fact] public void GenerateTypes_InvokeWithMembers_Success() { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { var type = new CodeTypeDeclaration(); type.Members.Add(new CodeTypeMember()); var e = new CodeNamespace(); e.Types.Add(type); int generateTypeStartCallCount = 0; int generateTypeEndCallCount = 0; generator.GenerateTypeStartAction = (actualE) => { Assert.Same(type, actualE); Assert.Equal(0, generateTypeEndCallCount); writer.Write("TypeStart "); generateTypeStartCallCount++; }; generator.GenerateTypeEndAction = (actualE) => { Assert.Same(type, actualE); writer.Write("TypeEnd"); generateTypeEndCallCount++; }; generator.GenerateTypes(e); Assert.Equal($"{Environment.NewLine}TypeStart TypeEnd", writer.ToString()); Assert.Equal(1, generateTypeStartCallCount); Assert.Equal(1, generateTypeEndCallCount); Assert.Same(type, generator.CurrentClass); }); } [Fact] public void GenerateTypes_InvokeEmpty_Nop() { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { var e = new CodeNamespace(); generator.GenerateTypes(e); Assert.Empty(writer.ToString()); }); } [Fact] public void GenerateTypes_InvokeEmptyEWithoutWriter_Nop() { CodeGeneratorTests generator = this; var e = new CodeNamespace(); generator.GenerateTypes(e); } [Fact] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Not fixed on NetFX")] public void GenerateTypes_NullE_ThrowsArgumentNullException() { CodeGeneratorTests generator = this; Assert.Throws<ArgumentNullException>("e", () => generator.GenerateTypes(null)); } [Fact] public void GenerateTypes_NullValueInE_ThrowsArgumentException() { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { var type = new CodeTypeDeclaration(); type.Comments.Add(new CodeCommentStatement()); var e = new CodeNamespace(); e.Types.Add(type); generator.GenerateNamespaceAction = (actualE, baseMethod) => baseMethod(actualE); generator.GenerateCommentStatementsAction = (actualE, baseMethod) => baseMethod(actualE); generator.GenerateNamespaceStartAction = (actualE) => {}; generator.GenerateNamespaceEndAction = (actualE) => {}; Assert.Throws<ArgumentException>("e", () => generator.GenerateTypes(e)); }); } [Theory] [InlineData(null, false)] [InlineData("", false)] [InlineData(" ", false)] [InlineData("a", true)] [InlineData("A", true)] [InlineData("\u01C5", true)] [InlineData("\u02B0", true)] [InlineData("\u2163", true)] [InlineData("\u0620", true)] [InlineData("_", true)] [InlineData("_aA\u01C5\u02B0\u2163\u0620_0", true)] [InlineData("aA\u01C5\u02B0\u2163\u0620_0", true)] [InlineData(" ", false)] [InlineData("a ", false)] [InlineData("#", false)] [InlineData("a#", false)] [InlineData("\u0300", false)] [InlineData("a\u0300", true)] [InlineData("\u0903", false)] [InlineData("a\u0903", true)] [InlineData("\u203F", false)] [InlineData("a\u203F", true)] [InlineData("0", false)] [InlineData("1", false)] [InlineData(":", false)] [InlineData(".", false)] [InlineData("$", false)] [InlineData("+", false)] [InlineData("<", false)] [InlineData(">", false)] [InlineData("-", false)] [InlineData("[", false)] [InlineData("]", false)] [InlineData(",", false)] [InlineData("&", false)] [InlineData("*", false)] [InlineData("`", false)] [InlineData("a0", true)] [InlineData("a1", true)] [InlineData("a:", false)] [InlineData("a.", false)] [InlineData("a$", false)] [InlineData("a+", false)] [InlineData("a<", false)] [InlineData("a>", false)] [InlineData("a-", false)] [InlineData("a[", false)] [InlineData("a]", false)] [InlineData("a,", false)] [InlineData("a&", false)] [InlineData("a*", false)] [InlineData("\0", false)] [InlineData("a\0", false)] [InlineData("\r", false)] [InlineData("a\r", false)] [InlineData("\n", false)] [InlineData("a\n", false)] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Not fixed on NetFX")] public void IsValidLanguageIndependentIdentifier_Invoke_ReturnsExpected(string value, bool expected) { Assert.Equal(expected, CodeGenerator.IsValidLanguageIndependentIdentifier(value)); } [Theory] [InlineData(null, "1")] [InlineData("", "1")] [InlineData("name", "name=1")] public void OutputAttributeArgument_Invoke_Success(string name, string expected) { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { var arg = new CodeAttributeArgument(name, new CodePrimitiveExpression(1)); generator.OutputIdentifierAction = (actualIdentifier, baseMethod) => baseMethod(actualIdentifier); generator.OutputAttributeArgumentAction = (actualArg, baseMethod) => baseMethod(actualArg); generator.GeneratePrimitiveExpressionAction = (actualE, baseMethod) => baseMethod(actualE); generator.OutputAttributeArgument(arg); Assert.Equal(expected, writer.ToString()); }); } [Fact] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Not fixed on NetFX")] public void OutputAttributeArgument_NullArg_ThrowsArgumentNullException() { CodeGeneratorTests generator = this; generator.OutputIdentifierAction = (actualIdentifier, baseMethod) => baseMethod(actualIdentifier); generator.OutputAttributeArgumentAction = (actualArg, baseMethod) => baseMethod(actualArg); generator.GeneratePrimitiveExpressionAction = (actualE, baseMethod) => baseMethod(actualE); Assert.Throws<ArgumentNullException>("arg", () => generator.OutputAttributeArgument(null)); } [Fact] public void OutputAttributeArgument_NullArgValue_ThrowsArgumentNullException() { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { var arg = new CodeAttributeArgument(); generator.OutputIdentifierAction = (actualIdentifier, baseMethod) => baseMethod(actualIdentifier); generator.OutputAttributeArgumentAction = (actualArg, baseMethod) => baseMethod(actualArg); generator.GeneratePrimitiveExpressionAction = (actualE, baseMethod) => baseMethod(actualE); Assert.Throws<ArgumentNullException>("e", () => generator.OutputAttributeArgument(arg)); }); } [Theory] [InlineData(null)] [InlineData("")] [InlineData("name")] public void OutputAttributeArgument_InvokeNonNullNameWithoutOutput_ThrowsNullReferenceException(string name) { CodeGeneratorTests generator = this; var arg = new CodeAttributeArgument(name, new CodePrimitiveExpression(1)); generator.OutputIdentifierAction = (actualIdentifier, baseMethod) => baseMethod(actualIdentifier); generator.OutputAttributeArgumentAction = (actualArg, baseMethod) => baseMethod(actualArg); generator.GeneratePrimitiveExpressionAction = (actualE, baseMethod) => baseMethod(actualE); Assert.Throws<NullReferenceException>(() => generator.OutputAttributeArgument(arg)); } [Fact] public void OutputAttributeDeclarations_NonEmptyAttributes_Success() { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { var attributes = new CodeAttributeDeclarationCollection(new CodeAttributeDeclaration[] { new CodeAttributeDeclaration(), new CodeAttributeDeclaration(string.Empty), new CodeAttributeDeclaration("name"), new CodeAttributeDeclaration("name", new CodeAttributeArgument(new CodePrimitiveExpression(1))), new CodeAttributeDeclaration("name", new CodeAttributeArgument("AttributeName", new CodePrimitiveExpression(1))), new CodeAttributeDeclaration("name", new CodeAttributeArgument("AttributeName1", new CodePrimitiveExpression(1)), new CodeAttributeArgument("AttributeName2", new CodePrimitiveExpression(2))) }); int generateAttributeDeclarationsStartCallCount = 0; int generateAttributeDeclarationsEndCallCount = 0; generator.OutputAttributeDeclarationsAction = (actualAttributes, baseMethod) => baseMethod(actualAttributes); generator.GenerateAttributeDeclarationsStartAction = (actualAttributes) => { Assert.Same(attributes, actualAttributes); Assert.Equal(0, generateAttributeDeclarationsEndCallCount); generator.Output.Write("StartAttributes "); generateAttributeDeclarationsStartCallCount++; }; generator.OutputIdentifierAction = (actualIdentifier, baseMethod) => baseMethod(actualIdentifier); generator.OutputAttributeArgumentAction = (actualArg, baseMethod) => baseMethod(actualArg); generator.GeneratePrimitiveExpressionAction = (actualE, baseMethod) => baseMethod(actualE); generator.GenerateAttributeDeclarationsEndAction = (actualAttributes) => { Assert.Same(attributes, actualAttributes); generator.Output.Write(" EndAttributes"); generateAttributeDeclarationsEndCallCount++; }; generator.OutputAttributeDeclarations(attributes); Assert.Equal(1, generateAttributeDeclarationsStartCallCount); Assert.Equal(1, generateAttributeDeclarationsStartCallCount); Assert.Equal($"StartAttributes (), {Environment.NewLine}(), {Environment.NewLine}name(), {Environment.NewLine}name(1), {Environment.NewLine}name(AttributeName=1), {Environment.NewLine}name(AttributeName1=1, AttributeName2=2) EndAttributes", writer.ToString()); }); } [Fact] public void OutputAttributeDeclarations_InvokeEmptyAttributes_Nop() { CodeGeneratorTests generator = this; generator.OutputAttributeDeclarationsAction = (actualAttributes, baseMethod) => baseMethod(actualAttributes); generator.OutputAttributeDeclarations(new CodeAttributeDeclarationCollection()); } [Fact] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Not fixed on NetFX")] public void OutputAttributeDeclarations_NullAttributes_ThrowsArgumentNullException() { CodeGeneratorTests generator = this; generator.OutputAttributeDeclarationsAction = (actualAttributes, baseMethod) => baseMethod(actualAttributes); Assert.Throws<ArgumentNullException>("attributes", () => generator.OutputAttributeDeclarations(null)); } [Fact] public void OutputAttributeDeclarations_InvokeNonEmptyAttributesNoOutput_ThrowsNullReferenceException() { CodeGeneratorTests generator = this; var attributes = new CodeAttributeDeclarationCollection(new CodeAttributeDeclaration[] { new CodeAttributeDeclaration(), new CodeAttributeDeclaration(string.Empty), new CodeAttributeDeclaration("name") }); generator.OutputAttributeDeclarationsAction = (actualAttributes, baseMethod) => baseMethod(actualAttributes); generator.GenerateAttributeDeclarationsStartAction = (actualAttributes) => {}; generator.GenerateAttributeDeclarationsEndAction = (actualAttributes) => {}; Assert.Throws<NullReferenceException>(() => generator.OutputAttributeDeclarations(attributes)); } [Fact] public void OutputAttributeDeclarations_NullArgumentExpressionInAttributes_ThrowsArgumentNullException() { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { var attributes = new CodeAttributeDeclarationCollection(new CodeAttributeDeclaration[] { new CodeAttributeDeclaration("name", new CodeAttributeArgument()) }); generator.OutputAttributeDeclarationsAction = (actualAttributes, baseMethod) => baseMethod(actualAttributes); generator.GenerateAttributeDeclarationsStartAction = (actualAttributes) => { }; generator.OutputAttributeArgumentAction = (actualArg, baseMethod) => baseMethod(actualArg); generator.GenerateAttributeDeclarationsEndAction = (actualAttributes) => { }; Assert.Throws<ArgumentNullException>("e", () => generator.OutputAttributeDeclarations(attributes)); }); } [Theory] [InlineData(FieldDirection.In, "")] [InlineData(FieldDirection.Out, "out ")] [InlineData(FieldDirection.Ref, "ref ")] [InlineData(FieldDirection.In - 1, "")] [InlineData(FieldDirection.Ref + 1, "")] public void OutputDirection_Invoke_Success(FieldDirection direction, string expected) { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { generator.OutputDirectionAction = (actualDirection, baseMethod) => baseMethod(actualDirection); generator.OutputDirection(direction); Assert.Equal(expected, writer.ToString()); }); } [Theory] [InlineData(FieldDirection.Out)] [InlineData(FieldDirection.Ref)] public void OutputDirection_InvokeWithoutOutput_ThrowsNullReferenceException(FieldDirection direction) { CodeGeneratorTests generator = this; generator.OutputDirectionAction = (actualDirection, baseMethod) => baseMethod(actualDirection); Assert.Throws<NullReferenceException>(() => generator.OutputDirection(direction)); } [Theory] [InlineData(FieldDirection.In)] [InlineData(FieldDirection.In - 1)] [InlineData(FieldDirection.Ref + 1)] public void OutputDirection_InvokeWithoutOutputInvaliddirection_Ndirection(FieldDirection direction) { CodeGeneratorTests generator = this; generator.OutputDirectionAction = (actualDirection, baseMethod) => baseMethod(actualDirection); generator.OutputDirection(direction); } [Theory] [InlineData(MemberAttributes.Abstract, "")] [InlineData(MemberAttributes.Final, "")] [InlineData(MemberAttributes.Static, "static ")] [InlineData(MemberAttributes.Override, "")] [InlineData(MemberAttributes.Const, "const ")] [InlineData(MemberAttributes.ScopeMask, "")] [InlineData(MemberAttributes.New, "new ")] [InlineData(MemberAttributes.VTableMask, "")] [InlineData(MemberAttributes.Overloaded, "")] [InlineData(MemberAttributes.Assembly, "")] [InlineData(MemberAttributes.FamilyAndAssembly, "")] [InlineData(MemberAttributes.Family, "")] [InlineData(MemberAttributes.FamilyOrAssembly, "")] [InlineData(MemberAttributes.Private, "")] [InlineData(MemberAttributes.Public, "")] [InlineData(MemberAttributes.AccessMask, "")] [InlineData(MemberAttributes.New | MemberAttributes.Private, "new ")] [InlineData(MemberAttributes.Static | MemberAttributes.Private, "static ")] [InlineData(MemberAttributes.Const | MemberAttributes.Private, "const ")] [InlineData(MemberAttributes.New | MemberAttributes.Static, "new static ")] [InlineData(MemberAttributes.New | MemberAttributes.Const, "new const ")] public void OutputFieldScopeModifier_Invoke_Success(MemberAttributes attributes, string expected) { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { generator.OutputFieldScopeModifierAction = (actualAttributes, baseMethod) => baseMethod(actualAttributes); generator.OutputFieldScopeModifier(attributes); Assert.Equal(expected, writer.ToString()); }); } [Theory] [InlineData(MemberAttributes.Static)] [InlineData(MemberAttributes.Const)] [InlineData(MemberAttributes.New)] [InlineData(MemberAttributes.New | MemberAttributes.Private)] [InlineData(MemberAttributes.Static | MemberAttributes.Private)] [InlineData(MemberAttributes.Const | MemberAttributes.Private)] [InlineData(MemberAttributes.New | MemberAttributes.Static)] [InlineData(MemberAttributes.New | MemberAttributes.Const)] public void OutputFieldScopeModifier_InvokeWithoutOutput_ThrowsNullReferenceException(MemberAttributes attributes) { CodeGeneratorTests generator = this; generator.OutputFieldScopeModifierAction = (actualAttributes, baseMethod) => baseMethod(actualAttributes); Assert.Throws<NullReferenceException>(() => generator.OutputFieldScopeModifier(attributes)); } [Theory] [InlineData(MemberAttributes.Abstract)] [InlineData(MemberAttributes.Final)] [InlineData(MemberAttributes.Override)] [InlineData(MemberAttributes.ScopeMask)] [InlineData(MemberAttributes.VTableMask)] [InlineData(MemberAttributes.Overloaded)] [InlineData(MemberAttributes.Assembly)] [InlineData(MemberAttributes.FamilyAndAssembly)] [InlineData(MemberAttributes.Family)] [InlineData(MemberAttributes.FamilyOrAssembly)] [InlineData(MemberAttributes.Private)] [InlineData(MemberAttributes.Public)] [InlineData(MemberAttributes.AccessMask)] public void OutputFieldScopeModifier_InvokeWithoutOutputInvalid_Nop(MemberAttributes attributes) { CodeGeneratorTests generator = this; generator.OutputFieldScopeModifierAction = (actualAttributes, baseMethod) => baseMethod(actualAttributes); generator.OutputFieldScopeModifier(attributes); } [Theory] [InlineData(null, "")] [InlineData("", "")] [InlineData("ident", "ident")] public void OutputIdentifier_InvokeWithOutput_Appends(string st, string expected) { CodeGeneratorTests generator = this; generator.PerformActionWithOutput(writer => { generator.OutputIdentifierAction = (actualSt, baseMethod) => baseMethod(actualSt); generator.OutputIdentifier(st); Assert.Equal(expected, writer.ToString()); }); } [Theory] [InlineData(null)] [InlineData("")] [InlineData("ident")] public void OutputIdentifier_InvokeWithoutOutput_ThrowsNullReferenceException(string ident) { CodeGeneratorTests generator = this; generator.OutputIdentifierAction = (actualSt, baseMethod) => baseMethod(actualSt); Assert.Throws<NullReferenceException>(() => generator.OutputIdentifier(ident)); } [Theory] [InlineData(MemberAttributes.Abstract, "")] [InlineData(MemberAttributes.Final, "")] [InlineData(MemberAttributes.Static, "")] [InlineData(MemberAttributes.Override, "")] [InlineData(MemberAttributes.Const, "")] [InlineData(MemberAttributes.ScopeMask, "")] [InlineData(MemberAttributes.New, "")] [InlineData(MemberAttributes.VTableMask, "")] [InlineData(MemberAttributes.Overloaded, "")] [InlineData(MemberAttributes.Assembly, "internal ")] [InlineData(MemberAttributes.FamilyAndAssembly, "internal ")] [InlineData(MemberAttributes.Family, "protected ")] [InlineData(MemberAttributes.FamilyOrAssembly, "protected internal ")] [InlineData(MemberAttributes.Private, "private ")] [InlineData(MemberAttributes.Public, "public ")] [InlineData(MemberAttributes.AccessMask, "")] [InlineData(MemberAttributes.New | MemberAttributes.Assembly, "internal ")] [InlineData(MemberAttributes.New | MemberAttributes.FamilyAndAssembly, "internal ")] [InlineData(MemberAttributes.New | MemberAttributes.Family, "protected ")] [InlineData(MemberAttributes.New | MemberAttributes.FamilyOrAssembly, "protected internal ")] [InlineData(MemberAttributes.New | MemberAttributes.Private, "private ")] [InlineData(MemberAttributes.New | MemberAttributes.Public, "public ")] [InlineData(MemberAttributes.New | MemberAttributes.Abstract | MemberAttributes.Assembly, "internal ")] [InlineData(MemberAttributes.New | MemberAttributes.Final | MemberAttributes.Assembly, "internal ")] [InlineData(MemberAttributes.New | MemberAttributes.Static | MemberAttributes.Assembly, "internal ")] [InlineData(MemberAttributes.New | MemberAttributes.Override | MemberAttributes.Assembly, "internal ")] [InlineData(MemberAttributes.New | MemberAttributes.Const | MemberAttributes.Assembly, "internal ")] [InlineData(MemberAttributes.New | MemberAttributes.Abstract | MemberAttributes.FamilyAndAssembly, "internal ")] [InlineData(MemberAttributes.New | MemberAttributes.Final | MemberAttributes.FamilyAndAssembly, "internal ")] [InlineData(MemberAttributes.New | MemberAttributes.Static | MemberAttributes.FamilyAndAssembly, "internal ")] [InlineData(MemberAttributes.New | MemberAttributes.Override | MemberAttributes.FamilyAndAssembly, "internal ")] [InlineData(MemberAttributes.New | MemberAttributes.Const | MemberAttributes.FamilyAndAssembly, "internal ")] [InlineData(MemberAttributes.New | MemberAttributes.Abstract | MemberAttributes.Family, "protected ")] [InlineData(MemberAttributes.New | MemberAttributes.Final | MemberAttributes.Family, "protected ")] [InlineData(MemberAttributes.New | MemberAttributes.Static | MemberAttributes.Family, "protected ")] [InlineData(MemberAttributes.New | MemberAttributes.Override | MemberAttributes.Family, "protected ")] [InlineData(MemberAttributes.New | MemberAttributes.Const | MemberAttributes.Family, "protected ")] [InlineData(MemberAttributes.New | MemberAttributes.Abstract | MemberAttributes.FamilyOrAssembly, "protected internal ")] [InlineData(MemberAttributes.New | MemberAttributes.Final | MemberAttributes.FamilyOrAssembly, "protected internal ")] [InlineData(MemberAttributes.New | MemberAttributes.Static | MemberAttributes.FamilyOrAssembly, "protected internal ")] [InlineData(MemberAttributes.New | MemberAttributes.Override | MemberAttributes.FamilyOrAssembly, "protected internal ")] [InlineData(MemberAttributes.New | MemberAttributes.Const | MemberAttributes.FamilyOrAssembly, "protected internal ")] [InlineData(MemberAttributes.New | MemberAttributes.Abstract | MemberAttributes.Private, "private ")] [InlineData(MemberAttributes.New | MemberAttributes.Final | MemberAttributes.Private, "private ")] [InlineData(MemberAttributes.New | MemberAttributes.Static | MemberAttributes.Private, "private ")] [InlineData(MemberAttributes.New | MemberAttributes.Override | MemberAttributes.Private, "private ")] [InlineData(MemberAttributes.New | MemberAttributes.Const | MemberAttributes.Private, "private ")] [InlineData(MemberAttributes.New | MemberAttributes.Abstract | MemberAttributes.Public, "public ")] [InlineData(MemberAttributes.New | MemberAttributes.Final | MemberAttributes.Public, "public ")] [InlineData(MemberAttributes.New | MemberAttributes.Static | MemberAttributes.Public, "public ")] [InlineData(MemberAttributes.New | MemberAttributes.Override | MemberAttributes.Public, "public ")] [InlineData(MemberAttributes.New | MemberAttributes.Const | MemberAttributes.Public, "public ")] public void OutputMemberAccessModifier_Invoke_Success(MemberAttributes attributes, string expected) { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { generator.OutputMemberAccessModifierAction = (actualAttributes, baseMethod) => baseMethod(actualAttributes); generator.OutputMemberAccessModifier(attributes); Assert.Equal(expected, writer.ToString()); }); } [Theory] [InlineData(MemberAttributes.Assembly)] [InlineData(MemberAttributes.FamilyAndAssembly)] [InlineData(MemberAttributes.Family)] [InlineData(MemberAttributes.FamilyOrAssembly)] [InlineData(MemberAttributes.Private)] [InlineData(MemberAttributes.Public)] [InlineData(MemberAttributes.New | MemberAttributes.Assembly)] [InlineData(MemberAttributes.New | MemberAttributes.FamilyAndAssembly)] [InlineData(MemberAttributes.New | MemberAttributes.Family)] [InlineData(MemberAttributes.New | MemberAttributes.FamilyOrAssembly)] [InlineData(MemberAttributes.New | MemberAttributes.Private)] [InlineData(MemberAttributes.New | MemberAttributes.Public)] [InlineData(MemberAttributes.New | MemberAttributes.Abstract | MemberAttributes.Assembly)] [InlineData(MemberAttributes.New | MemberAttributes.Final | MemberAttributes.Assembly)] [InlineData(MemberAttributes.New | MemberAttributes.Static | MemberAttributes.Assembly)] [InlineData(MemberAttributes.New | MemberAttributes.Override | MemberAttributes.Assembly)] [InlineData(MemberAttributes.New | MemberAttributes.Const | MemberAttributes.Assembly)] [InlineData(MemberAttributes.New | MemberAttributes.Abstract | MemberAttributes.FamilyAndAssembly)] [InlineData(MemberAttributes.New | MemberAttributes.Final | MemberAttributes.FamilyAndAssembly)] [InlineData(MemberAttributes.New | MemberAttributes.Static | MemberAttributes.FamilyAndAssembly)] [InlineData(MemberAttributes.New | MemberAttributes.Override | MemberAttributes.FamilyAndAssembly)] [InlineData(MemberAttributes.New | MemberAttributes.Const | MemberAttributes.FamilyAndAssembly)] [InlineData(MemberAttributes.New | MemberAttributes.Abstract | MemberAttributes.Family)] [InlineData(MemberAttributes.New | MemberAttributes.Final | MemberAttributes.Family)] [InlineData(MemberAttributes.New | MemberAttributes.Static | MemberAttributes.Family)] [InlineData(MemberAttributes.New | MemberAttributes.Override | MemberAttributes.Family)] [InlineData(MemberAttributes.New | MemberAttributes.Const | MemberAttributes.Family)] [InlineData(MemberAttributes.New | MemberAttributes.Abstract | MemberAttributes.FamilyOrAssembly)] [InlineData(MemberAttributes.New | MemberAttributes.Final | MemberAttributes.FamilyOrAssembly)] [InlineData(MemberAttributes.New | MemberAttributes.Static | MemberAttributes.FamilyOrAssembly)] [InlineData(MemberAttributes.New | MemberAttributes.Override | MemberAttributes.FamilyOrAssembly)] [InlineData(MemberAttributes.New | MemberAttributes.Const | MemberAttributes.FamilyOrAssembly)] [InlineData(MemberAttributes.New | MemberAttributes.Abstract | MemberAttributes.Private)] [InlineData(MemberAttributes.New | MemberAttributes.Final | MemberAttributes.Private)] [InlineData(MemberAttributes.New | MemberAttributes.Static | MemberAttributes.Private)] [InlineData(MemberAttributes.New | MemberAttributes.Override | MemberAttributes.Private)] [InlineData(MemberAttributes.New | MemberAttributes.Const | MemberAttributes.Private)] [InlineData(MemberAttributes.New | MemberAttributes.Abstract | MemberAttributes.Public)] [InlineData(MemberAttributes.New | MemberAttributes.Final | MemberAttributes.Public)] [InlineData(MemberAttributes.New | MemberAttributes.Static | MemberAttributes.Public)] [InlineData(MemberAttributes.New | MemberAttributes.Override | MemberAttributes.Public)] [InlineData(MemberAttributes.New | MemberAttributes.Const | MemberAttributes.Public)] public void OutputMemberAccessModifier_InvokeWithoutOutput_ThrowsNullReferenceException(MemberAttributes attributes) { CodeGeneratorTests generator = this; generator.OutputMemberAccessModifierAction = (actualAttributes, baseMethod) => baseMethod(actualAttributes); Assert.Throws<NullReferenceException>(() => generator.OutputMemberAccessModifier(attributes)); } [Theory] [InlineData(MemberAttributes.Abstract)] [InlineData(MemberAttributes.Final)] [InlineData(MemberAttributes.Static)] [InlineData(MemberAttributes.Override)] [InlineData(MemberAttributes.Const)] [InlineData(MemberAttributes.ScopeMask)] [InlineData(MemberAttributes.New)] [InlineData(MemberAttributes.VTableMask)] [InlineData(MemberAttributes.Overloaded)] [InlineData(MemberAttributes.AccessMask)] public void OutputMemberAccessModifier_InvokeWithoutOutputInvalid_Nop(MemberAttributes attributes) { CodeGeneratorTests generator = this; generator.OutputMemberAccessModifierAction = (actualAttributes, baseMethod) => baseMethod(actualAttributes); generator.OutputMemberAccessModifier(attributes); } [Theory] [InlineData(MemberAttributes.Abstract, "abstract ")] [InlineData(MemberAttributes.Final, "")] [InlineData(MemberAttributes.Static, "static ")] [InlineData(MemberAttributes.Override, "override ")] [InlineData(MemberAttributes.Const, "")] [InlineData(MemberAttributes.ScopeMask, "")] [InlineData(MemberAttributes.New, "new ")] [InlineData(MemberAttributes.VTableMask, "")] [InlineData(MemberAttributes.Overloaded, "")] [InlineData(MemberAttributes.Assembly, "")] [InlineData(MemberAttributes.FamilyAndAssembly, "")] [InlineData(MemberAttributes.Family, "virtual ")] [InlineData(MemberAttributes.FamilyOrAssembly, "")] [InlineData(MemberAttributes.Private, "")] [InlineData(MemberAttributes.Public, "virtual ")] [InlineData(MemberAttributes.AccessMask, "")] [InlineData(MemberAttributes.New | MemberAttributes.Assembly, "new ")] [InlineData(MemberAttributes.New | MemberAttributes.FamilyAndAssembly, "new ")] [InlineData(MemberAttributes.New | MemberAttributes.Family, "new virtual ")] [InlineData(MemberAttributes.New | MemberAttributes.FamilyOrAssembly, "new ")] [InlineData(MemberAttributes.New | MemberAttributes.Private, "new ")] [InlineData(MemberAttributes.New | MemberAttributes.Public, "new virtual ")] [InlineData(MemberAttributes.New | MemberAttributes.Abstract | MemberAttributes.Assembly, "new abstract ")] [InlineData(MemberAttributes.New | MemberAttributes.Final | MemberAttributes.Assembly, "new ")] [InlineData(MemberAttributes.New | MemberAttributes.Static | MemberAttributes.Assembly, "new static ")] [InlineData(MemberAttributes.New | MemberAttributes.Override | MemberAttributes.Assembly, "new override ")] [InlineData(MemberAttributes.New | MemberAttributes.Const | MemberAttributes.Assembly, "new ")] [InlineData(MemberAttributes.New | MemberAttributes.Abstract | MemberAttributes.FamilyAndAssembly, "new abstract ")] [InlineData(MemberAttributes.New | MemberAttributes.Final | MemberAttributes.FamilyAndAssembly, "new ")] [InlineData(MemberAttributes.New | MemberAttributes.Static | MemberAttributes.FamilyAndAssembly, "new static ")] [InlineData(MemberAttributes.New | MemberAttributes.Override | MemberAttributes.FamilyAndAssembly, "new override ")] [InlineData(MemberAttributes.New | MemberAttributes.Const | MemberAttributes.FamilyAndAssembly, "new ")] [InlineData(MemberAttributes.New | MemberAttributes.Abstract | MemberAttributes.Family, "new abstract ")] [InlineData(MemberAttributes.New | MemberAttributes.Final | MemberAttributes.Family, "new ")] [InlineData(MemberAttributes.New | MemberAttributes.Static | MemberAttributes.Family, "new static ")] [InlineData(MemberAttributes.New | MemberAttributes.Override | MemberAttributes.Family, "new override ")] [InlineData(MemberAttributes.New | MemberAttributes.Const | MemberAttributes.Family, "new virtual ")] [InlineData(MemberAttributes.New | MemberAttributes.Abstract | MemberAttributes.FamilyOrAssembly, "new abstract ")] [InlineData(MemberAttributes.New | MemberAttributes.Final | MemberAttributes.FamilyOrAssembly, "new ")] [InlineData(MemberAttributes.New | MemberAttributes.Static | MemberAttributes.FamilyOrAssembly, "new static ")] [InlineData(MemberAttributes.New | MemberAttributes.Override | MemberAttributes.FamilyOrAssembly, "new override ")] [InlineData(MemberAttributes.New | MemberAttributes.Const | MemberAttributes.FamilyOrAssembly, "new ")] [InlineData(MemberAttributes.New | MemberAttributes.Abstract | MemberAttributes.Private, "new abstract ")] [InlineData(MemberAttributes.New | MemberAttributes.Final | MemberAttributes.Private, "new ")] [InlineData(MemberAttributes.New | MemberAttributes.Static | MemberAttributes.Private, "new static ")] [InlineData(MemberAttributes.New | MemberAttributes.Override | MemberAttributes.Private, "new override ")] [InlineData(MemberAttributes.New | MemberAttributes.Const | MemberAttributes.Private, "new ")] [InlineData(MemberAttributes.New | MemberAttributes.Abstract | MemberAttributes.Public, "new abstract ")] [InlineData(MemberAttributes.New | MemberAttributes.Final | MemberAttributes.Public, "new ")] [InlineData(MemberAttributes.New | MemberAttributes.Static | MemberAttributes.Public, "new static ")] [InlineData(MemberAttributes.New | MemberAttributes.Override | MemberAttributes.Public, "new override ")] [InlineData(MemberAttributes.New | MemberAttributes.Const | MemberAttributes.Public, "new virtual ")] public void OutputMemberScopeModifier_Invoke_Success(MemberAttributes attributes, string expected) { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { generator.OutputMemberScopeModifierAction = (actualAttributes, baseMethod) => baseMethod(actualAttributes); generator.OutputMemberScopeModifier(attributes); Assert.Equal(expected, writer.ToString()); }); } [Theory] [InlineData(MemberAttributes.Abstract)] [InlineData(MemberAttributes.Final)] [InlineData(MemberAttributes.Static)] [InlineData(MemberAttributes.Override)] [InlineData(MemberAttributes.New)] [InlineData(MemberAttributes.Family)] [InlineData(MemberAttributes.Public)] [InlineData(MemberAttributes.New | MemberAttributes.Assembly)] [InlineData(MemberAttributes.New | MemberAttributes.FamilyAndAssembly)] [InlineData(MemberAttributes.New | MemberAttributes.Family)] [InlineData(MemberAttributes.New | MemberAttributes.FamilyOrAssembly)] [InlineData(MemberAttributes.New | MemberAttributes.Private)] [InlineData(MemberAttributes.New | MemberAttributes.Public)] [InlineData(MemberAttributes.New | MemberAttributes.Abstract | MemberAttributes.Assembly)] [InlineData(MemberAttributes.New | MemberAttributes.Final | MemberAttributes.Assembly)] [InlineData(MemberAttributes.New | MemberAttributes.Static | MemberAttributes.Assembly)] [InlineData(MemberAttributes.New | MemberAttributes.Override | MemberAttributes.Assembly)] [InlineData(MemberAttributes.New | MemberAttributes.Const | MemberAttributes.Assembly)] [InlineData(MemberAttributes.New | MemberAttributes.Abstract | MemberAttributes.FamilyAndAssembly)] [InlineData(MemberAttributes.New | MemberAttributes.Final | MemberAttributes.FamilyAndAssembly)] [InlineData(MemberAttributes.New | MemberAttributes.Static | MemberAttributes.FamilyAndAssembly)] [InlineData(MemberAttributes.New | MemberAttributes.Override | MemberAttributes.FamilyAndAssembly)] [InlineData(MemberAttributes.New | MemberAttributes.Const | MemberAttributes.FamilyAndAssembly)] [InlineData(MemberAttributes.New | MemberAttributes.Abstract | MemberAttributes.Family)] [InlineData(MemberAttributes.New | MemberAttributes.Final | MemberAttributes.Family)] [InlineData(MemberAttributes.New | MemberAttributes.Static | MemberAttributes.Family)] [InlineData(MemberAttributes.New | MemberAttributes.Override | MemberAttributes.Family)] [InlineData(MemberAttributes.New | MemberAttributes.Const | MemberAttributes.Family)] [InlineData(MemberAttributes.New | MemberAttributes.Abstract | MemberAttributes.FamilyOrAssembly)] [InlineData(MemberAttributes.New | MemberAttributes.Final | MemberAttributes.FamilyOrAssembly)] [InlineData(MemberAttributes.New | MemberAttributes.Static | MemberAttributes.FamilyOrAssembly)] [InlineData(MemberAttributes.New | MemberAttributes.Override | MemberAttributes.FamilyOrAssembly)] [InlineData(MemberAttributes.New | MemberAttributes.Const | MemberAttributes.FamilyOrAssembly)] [InlineData(MemberAttributes.New | MemberAttributes.Abstract | MemberAttributes.Private)] [InlineData(MemberAttributes.New | MemberAttributes.Final | MemberAttributes.Private)] [InlineData(MemberAttributes.New | MemberAttributes.Static | MemberAttributes.Private)] [InlineData(MemberAttributes.New | MemberAttributes.Override | MemberAttributes.Private)] [InlineData(MemberAttributes.New | MemberAttributes.Const | MemberAttributes.Private)] [InlineData(MemberAttributes.New | MemberAttributes.Abstract | MemberAttributes.Public)] [InlineData(MemberAttributes.New | MemberAttributes.Final | MemberAttributes.Public)] [InlineData(MemberAttributes.New | MemberAttributes.Static | MemberAttributes.Public)] [InlineData(MemberAttributes.New | MemberAttributes.Override | MemberAttributes.Public)] [InlineData(MemberAttributes.New | MemberAttributes.Const | MemberAttributes.Public)] public void OutputMemberScopeModifier_InvokeWithoutOutput_ThrowsNullReferenceException(MemberAttributes attributes) { CodeGeneratorTests generator = this; generator.OutputMemberScopeModifierAction = (actualAttributes, baseMethod) => baseMethod(actualAttributes); Assert.Throws<NullReferenceException>(() => generator.OutputMemberScopeModifier(attributes)); } [Theory] [InlineData(MemberAttributes.Const)] [InlineData(MemberAttributes.ScopeMask)] [InlineData(MemberAttributes.VTableMask)] [InlineData(MemberAttributes.Overloaded)] [InlineData(MemberAttributes.Assembly)] [InlineData(MemberAttributes.FamilyAndAssembly)] [InlineData(MemberAttributes.FamilyOrAssembly)] [InlineData(MemberAttributes.Private)] [InlineData(MemberAttributes.AccessMask)] public void OutputMemberScopeModifier_InvokeWithoutOutputInvalid_Nop(MemberAttributes attributes) { CodeGeneratorTests generator = this; generator.OutputMemberScopeModifierAction = (actualAttributes, baseMethod) => baseMethod(actualAttributes); generator.OutputMemberScopeModifier(attributes); } [Theory] [InlineData(CodeBinaryOperatorType.Add, "+")] [InlineData(CodeBinaryOperatorType.Assign, "=")] [InlineData(CodeBinaryOperatorType.BitwiseAnd, "&")] [InlineData(CodeBinaryOperatorType.BitwiseOr, "|")] [InlineData(CodeBinaryOperatorType.BooleanAnd, "&&")] [InlineData(CodeBinaryOperatorType.BooleanOr, "||")] [InlineData(CodeBinaryOperatorType.Divide, "/")] [InlineData(CodeBinaryOperatorType.GreaterThan, ">")] [InlineData(CodeBinaryOperatorType.GreaterThanOrEqual, ">=")] [InlineData(CodeBinaryOperatorType.IdentityEquality, "==")] [InlineData(CodeBinaryOperatorType.IdentityInequality, "!=")] [InlineData(CodeBinaryOperatorType.LessThan, "<")] [InlineData(CodeBinaryOperatorType.LessThanOrEqual, "<=")] [InlineData(CodeBinaryOperatorType.Modulus, "%")] [InlineData(CodeBinaryOperatorType.Multiply, "*")] [InlineData(CodeBinaryOperatorType.Subtract, "-")] [InlineData(CodeBinaryOperatorType.ValueEquality, "==")] [InlineData(CodeBinaryOperatorType.Add - 1, "")] [InlineData(CodeBinaryOperatorType.GreaterThanOrEqual + 1, "")] public void OutputOperator_Invoke_Success(CodeBinaryOperatorType op, string expected) { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { generator.OutputOperatorAction = (actualOp, baseMethod) => baseMethod(actualOp); generator.OutputOperator(op); Assert.Equal(expected, writer.ToString()); }); } [Theory] [InlineData(CodeBinaryOperatorType.Add)] [InlineData(CodeBinaryOperatorType.Assign)] [InlineData(CodeBinaryOperatorType.BitwiseAnd)] [InlineData(CodeBinaryOperatorType.BitwiseOr)] [InlineData(CodeBinaryOperatorType.BooleanAnd)] [InlineData(CodeBinaryOperatorType.BooleanOr)] [InlineData(CodeBinaryOperatorType.Divide)] [InlineData(CodeBinaryOperatorType.GreaterThan)] [InlineData(CodeBinaryOperatorType.GreaterThanOrEqual)] [InlineData(CodeBinaryOperatorType.IdentityEquality)] [InlineData(CodeBinaryOperatorType.IdentityInequality)] [InlineData(CodeBinaryOperatorType.LessThan)] [InlineData(CodeBinaryOperatorType.LessThanOrEqual)] [InlineData(CodeBinaryOperatorType.Modulus)] [InlineData(CodeBinaryOperatorType.Multiply)] [InlineData(CodeBinaryOperatorType.Subtract)] [InlineData(CodeBinaryOperatorType.ValueEquality)] public void OutputOperator_InvokeWithoutOutput_ThrowsNullReferenceException(CodeBinaryOperatorType op) { CodeGeneratorTests generator = this; generator.OutputOperatorAction = (actualOp, baseMethod) => baseMethod(actualOp); Assert.Throws<NullReferenceException>(() => generator.OutputOperator(op)); } [Theory] [InlineData(CodeBinaryOperatorType.Add - 1)] [InlineData(CodeBinaryOperatorType.GreaterThanOrEqual + 1)] public void OutputOperator_InvokeWithoutOutputInvalidOp_Nop(CodeBinaryOperatorType op) { CodeGeneratorTests generator = this; generator.OutputOperatorAction = (actualOp, baseMethod) => baseMethod(actualOp); generator.OutputOperator(op); } public static IEnumerable<object[]> OutputParameter_TestData() { yield return new object[] { new CodeParameterDeclarationExpression[0], "" }; yield return new object[] { new CodeParameterDeclarationExpression[] { new CodeParameterDeclarationExpression() }, "Type " }; yield return new object[] { new CodeParameterDeclarationExpression[] { new CodeParameterDeclarationExpression("type1", "name1"), new CodeParameterDeclarationExpression("type2", "name2") }, "Type name1, Type name2" }; yield return new object[] { new CodeParameterDeclarationExpression[] { new CodeParameterDeclarationExpression(), new CodeParameterDeclarationExpression(), new CodeParameterDeclarationExpression(), new CodeParameterDeclarationExpression(), new CodeParameterDeclarationExpression(), new CodeParameterDeclarationExpression(), new CodeParameterDeclarationExpression(), new CodeParameterDeclarationExpression(), new CodeParameterDeclarationExpression(), new CodeParameterDeclarationExpression(), new CodeParameterDeclarationExpression(), new CodeParameterDeclarationExpression(), new CodeParameterDeclarationExpression(), new CodeParameterDeclarationExpression(), new CodeParameterDeclarationExpression() }, "Type , Type , Type , Type , Type , Type , Type , Type , Type , Type , Type , Type , Type , Type , Type " }; yield return new object[] { new CodeParameterDeclarationExpression[] { new CodeParameterDeclarationExpression(), new CodeParameterDeclarationExpression(), new CodeParameterDeclarationExpression(), new CodeParameterDeclarationExpression(), new CodeParameterDeclarationExpression(), new CodeParameterDeclarationExpression(), new CodeParameterDeclarationExpression(), new CodeParameterDeclarationExpression(), new CodeParameterDeclarationExpression(), new CodeParameterDeclarationExpression(), new CodeParameterDeclarationExpression(), new CodeParameterDeclarationExpression(), new CodeParameterDeclarationExpression(), new CodeParameterDeclarationExpression(), new CodeParameterDeclarationExpression(), new CodeParameterDeclarationExpression() }, $"{Environment.NewLine}Type , {Environment.NewLine}Type , {Environment.NewLine}Type , {Environment.NewLine}Type , {Environment.NewLine}Type , {Environment.NewLine}Type , {Environment.NewLine}Type , {Environment.NewLine}Type , {Environment.NewLine}Type , {Environment.NewLine}Type , {Environment.NewLine}Type , {Environment.NewLine}Type , {Environment.NewLine}Type , {Environment.NewLine}Type , {Environment.NewLine}Type , {Environment.NewLine}Type " }; } [Theory] [MemberData(nameof(OutputParameter_TestData))] public void OutputParameter_Invoke_Success(CodeParameterDeclarationExpression[] parametersArray, string expected) { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { var parameters = new CodeParameterDeclarationExpressionCollection(parametersArray); generator.OutputParametersAction = (actualParameters, baseMethod) => baseMethod(actualParameters); int generateParameterDeclarationExpressionCallCount = 0; int outputTypeActionCallCount = 0; generator.GenerateParameterDeclarationExpressionAction = (actualE, baseMethod) => { baseMethod(actualE); Assert.Same(parameters[generateParameterDeclarationExpressionCallCount], actualE); generateParameterDeclarationExpressionCallCount++; }; generator.OutputDirectionAction = (actualDirection, baseMethod) => baseMethod(actualDirection); generator.OutputTypeNamePairAction = (actualTypeRef, actualName, baseMethod) => baseMethod(actualTypeRef, actualName); generator.OutputTypeAction = (actualTypeRef) => { Assert.Same(parameters[generateParameterDeclarationExpressionCallCount].Type, actualTypeRef); writer.Write("Type"); outputTypeActionCallCount++; }; generator.OutputIdentifierAction = (actualTypeRef, baseMethod) => baseMethod(actualTypeRef); generator.OutputParameters(parameters); Assert.Equal(expected, writer.ToString()); Assert.Equal(parameters.Count, generateParameterDeclarationExpressionCallCount); Assert.Equal(parameters.Count, outputTypeActionCallCount); // Call again to make sure indent is reset. Assert.Equal(expected, writer.ToString()); Assert.Equal(parameters.Count, generateParameterDeclarationExpressionCallCount); Assert.Equal(parameters.Count, outputTypeActionCallCount); }); } [Fact] public void OutputParameters_EmptyParametersWithoutWriter_Nop() { CodeGeneratorTests generator = this; var parameters = new CodeParameterDeclarationExpressionCollection(); generator.OutputParametersAction = (actualParameters, baseMethod) => baseMethod(actualParameters); generator.OutputParameters(parameters); } [Fact] public void OutputParameters_InvokeWithoutWriter_ThrowsNullReferenceException() { CodeGeneratorTests generator = this; var parameters = new CodeParameterDeclarationExpressionCollection(new CodeParameterDeclarationExpression[] { new CodeParameterDeclarationExpression() }); generator.OutputParametersAction = (actualParameters, baseMethod) => baseMethod(actualParameters); int generateParameterDeclarationExpressionCallCount = 0; generator.GenerateParameterDeclarationExpressionAction = (actualE, baseMethod) => { baseMethod(actualE); Assert.Same(parameters[generateParameterDeclarationExpressionCallCount], actualE); generateParameterDeclarationExpressionCallCount++; }; generator.OutputDirectionAction = (actualDirection, baseMethod) => baseMethod(actualDirection); generator.OutputTypeNamePairAction = (actualTypeRef, actualName, baseMethod) => baseMethod(actualTypeRef, actualName); generator.OutputTypeAction = (actualTypeRef) => { }; Assert.Throws<NullReferenceException>(() => generator.OutputParameters(parameters)); } [Fact] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Not fixed on NetFX")] public void OutputParameters_NullParameters_ThrowsArgumentNullException() { CodeGeneratorTests generator = this; generator.OutputParametersAction = (actualParameters, baseMethod) => baseMethod(actualParameters); Assert.Throws<ArgumentNullException>("parameters", () => generator.OutputParameters(null)); } public static IEnumerable<object[]> OutputTypeNamePair_TestData() { yield return new object[] { null, null }; yield return new object[] { new CodeTypeReference(), "" }; yield return new object[] { new CodeTypeReference(), "name" }; } [Theory] [MemberData(nameof(OutputTypeNamePair_TestData))] public void OutputTypeNamePair_Invoke_Success(CodeTypeReference typeRef, string name) { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { generator.OutputTypeNamePairAction = (actualTypeRef, actualName, baseMethod) => baseMethod(actualTypeRef, actualName); int outputTypeCallCount = 0; int outputIdentifierCallCount = 0; generator.OutputTypeAction = (actualTypeRef) => { Assert.Same(typeRef, actualTypeRef); Assert.Equal(0, outputIdentifierCallCount); writer.Write("Type"); outputTypeCallCount++; }; generator.OutputIdentifierAction = (actualIdent, baseMethod) => { baseMethod(actualIdent); outputIdentifierCallCount++; }; generator.OutputTypeNamePair(typeRef, name); Assert.Equal($"Type {name}", writer.ToString()); Assert.Equal(1, outputTypeCallCount); Assert.Equal(1, outputIdentifierCallCount); }); } [Fact] public void OutputTypeNamePair_InvokeWithoutOutput_ThrowsNullReferenceException() { CodeGeneratorTests generator = this; var typeRef = new CodeTypeReference(); generator.OutputTypeNamePairAction = (actualTypeRef, actualName, baseMethod) => baseMethod(actualTypeRef, actualName); generator.OutputTypeAction = (actualTypeRef) => {}; Assert.Throws<NullReferenceException>(() => generator.OutputTypeNamePair(typeRef, "name")); } [Theory] [InlineData(TypeAttributes.NotPublic, false, false, "class ")] [InlineData(TypeAttributes.NotPublic, true, false, "struct ")] [InlineData(TypeAttributes.NotPublic, true, true, "struct ")] [InlineData(TypeAttributes.NotPublic, false, true, "enum ")] [InlineData(TypeAttributes.Public, false, false, "public class ")] [InlineData(TypeAttributes.Public, true, false, "public struct ")] [InlineData(TypeAttributes.Public, true, true, "public struct ")] [InlineData(TypeAttributes.Public, false, true, "public enum ")] [InlineData(TypeAttributes.NestedPublic, false, false, "public class ")] [InlineData(TypeAttributes.NestedPublic, true, false, "public struct ")] [InlineData(TypeAttributes.NestedPublic, true, true, "public struct ")] [InlineData(TypeAttributes.NestedPublic, false, true, "public enum ")] [InlineData(TypeAttributes.NestedPrivate, false, false, "private class ")] [InlineData(TypeAttributes.NestedPrivate, true, false, "private struct ")] [InlineData(TypeAttributes.NestedPrivate, true, true, "private struct ")] [InlineData(TypeAttributes.NestedPrivate, false, true, "private enum ")] [InlineData(TypeAttributes.NestedFamily, false, false, "class ")] [InlineData(TypeAttributes.NestedAssembly, false, false, "class ")] [InlineData(TypeAttributes.NestedFamANDAssem, false, false, "class ")] [InlineData(TypeAttributes.NestedFamORAssem, false, false, "class ")] [InlineData(TypeAttributes.SequentialLayout, false, false, "class ")] [InlineData(TypeAttributes.ExplicitLayout, false, false, "class ")] [InlineData(TypeAttributes.LayoutMask, false, false, "class ")] [InlineData(TypeAttributes.Interface, false, false, "interface ")] [InlineData(TypeAttributes.Abstract, false, false, "abstract class ")] [InlineData(TypeAttributes.Abstract, true, false, "struct ")] [InlineData(TypeAttributes.Abstract, true, true, "struct ")] [InlineData(TypeAttributes.Abstract, false, true, "enum ")] [InlineData(TypeAttributes.Sealed, false, false, "sealed class ")] [InlineData(TypeAttributes.Sealed, true, false, "struct ")] [InlineData(TypeAttributes.Sealed, true, true, "struct ")] [InlineData(TypeAttributes.Sealed, false, true, "enum ")] [InlineData(TypeAttributes.SpecialName, false, false, "class ")] [InlineData(TypeAttributes.RTSpecialName, false, false, "class ")] [InlineData(TypeAttributes.Import, false, false, "class ")] [InlineData(TypeAttributes.Serializable, false, false, "class ")] [InlineData(TypeAttributes.WindowsRuntime, false, false, "class ")] [InlineData(TypeAttributes.UnicodeClass, false, false, "class ")] [InlineData(TypeAttributes.AutoClass, false, false, "class ")] [InlineData(TypeAttributes.CustomFormatClass, false, false, "class ")] [InlineData(TypeAttributes.HasSecurity, false, false, "class ")] [InlineData(TypeAttributes.ReservedMask, false, false, "class ")] [InlineData(TypeAttributes.BeforeFieldInit, false, false, "class ")] [InlineData(TypeAttributes.CustomFormatMask, false, false, "class ")] public void OutputTypeAttributes_Invoke_Success(TypeAttributes attributes, bool isStruct, bool isEnum, string expected) { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { generator.OutputTypeAttributesAction = (actualAttributes, isStruct, isEnum, baseMethod) => baseMethod(actualAttributes, isStruct, isEnum); generator.OutputTypeAttributes(attributes, isStruct, isEnum); Assert.Equal(expected, writer.ToString()); }); } [Theory] [InlineData(TypeAttributes.NotPublic, false, false)] [InlineData(TypeAttributes.NotPublic, true, false)] [InlineData(TypeAttributes.NotPublic, true, true)] [InlineData(TypeAttributes.NotPublic, false, true)] [InlineData(TypeAttributes.Public, false, false)] [InlineData(TypeAttributes.Public, true, false)] [InlineData(TypeAttributes.Public, true, true)] [InlineData(TypeAttributes.Public, false, true)] [InlineData(TypeAttributes.NestedPublic, false, false)] [InlineData(TypeAttributes.NestedPublic, true, false)] [InlineData(TypeAttributes.NestedPublic, true, true)] [InlineData(TypeAttributes.NestedPublic, false, true)] [InlineData(TypeAttributes.NestedPrivate, false, false)] [InlineData(TypeAttributes.NestedPrivate, true, false)] [InlineData(TypeAttributes.NestedPrivate, true, true)] [InlineData(TypeAttributes.NestedPrivate, false, true)] [InlineData(TypeAttributes.NestedFamily, false, false)] [InlineData(TypeAttributes.NestedAssembly, false, false)] [InlineData(TypeAttributes.NestedFamANDAssem, false, false)] [InlineData(TypeAttributes.NestedFamORAssem, false, false)] [InlineData(TypeAttributes.SequentialLayout, false, false)] [InlineData(TypeAttributes.ExplicitLayout, false, false)] [InlineData(TypeAttributes.LayoutMask, false, false)] [InlineData(TypeAttributes.Interface, false, false)] [InlineData(TypeAttributes.Abstract, false, false)] [InlineData(TypeAttributes.Abstract, true, false)] [InlineData(TypeAttributes.Abstract, true, true)] [InlineData(TypeAttributes.Abstract, false, true)] [InlineData(TypeAttributes.Sealed, false, false)] [InlineData(TypeAttributes.Sealed, true, false)] [InlineData(TypeAttributes.Sealed, true, true)] [InlineData(TypeAttributes.Sealed, false, true)] [InlineData(TypeAttributes.SpecialName, false, false)] [InlineData(TypeAttributes.RTSpecialName, false, false)] [InlineData(TypeAttributes.Import, false, false)] [InlineData(TypeAttributes.Serializable, false, false)] [InlineData(TypeAttributes.WindowsRuntime, false, false)] [InlineData(TypeAttributes.UnicodeClass, false, false)] [InlineData(TypeAttributes.AutoClass, false, false)] [InlineData(TypeAttributes.CustomFormatClass, false, false)] [InlineData(TypeAttributes.HasSecurity, false, false)] [InlineData(TypeAttributes.ReservedMask, false, false)] [InlineData(TypeAttributes.BeforeFieldInit, false, false)] [InlineData(TypeAttributes.CustomFormatMask, false, false)] public void OutputTypeAttributes_InvokeWithoutWriter_ThrowsNullReferenceException(TypeAttributes attributes, bool isStruct, bool isEnum) { CodeGeneratorTests generator = this; generator.OutputTypeAttributesAction = (actualAttributes, isStruct, isEnum, baseMethod) => baseMethod(actualAttributes, isStruct, isEnum); Assert.Throws<NullReferenceException>(() => generator.OutputTypeAttributes(attributes, isStruct, isEnum)); } [Theory] [InlineData(null)] [InlineData("")] [InlineData("value")] public void ValidateIdentifier_InvokeValid_Nop(string value) { CodeGeneratorTests generator = this; int isValidIdentifierCallCount = 0; generator.ValidateIdentifierAction = (actualValue, baseMethod) => baseMethod(actualValue); generator.IsValidIdentifierAction = (actualValue) => { Assert.Same(value, actualValue); isValidIdentifierCallCount++; return true; }; generator.ValidateIdentifier(value); Assert.Equal(1, isValidIdentifierCallCount); } [Theory] [InlineData(null)] [InlineData("")] [InlineData("value")] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Not fixed on NetFX")] public void ValidateIdentifier_InvokeInvalid_ThrowsArgumentException(string value) { CodeGeneratorTests generator = this; int isValidIdentifierCallCount = 0; generator.ValidateIdentifierAction = (actualValue, baseMethod) => baseMethod(actualValue); generator.IsValidIdentifierAction = (actualValue) => { Assert.Same(value, actualValue); isValidIdentifierCallCount++; return false; }; Assert.Throws<ArgumentException>("value", () => generator.ValidateIdentifier(value)); Assert.Equal(1, isValidIdentifierCallCount); } [Theory] [InlineData(null, null)] [InlineData(null, "")] [InlineData(null, "identifier")] [InlineData("", null)] [InlineData("", "")] [InlineData("", "identifier")] [InlineData("identifier", null)] [InlineData("identifier", "")] [InlineData("identifier", "escapedIdentifier")] public void ICodeGeneratorCreateEscapedIdentifier_Invoke_ReturnsExpected(string value, string result) { CodeGeneratorTests generator = this; int callCount = 0; generator.CreateEscapedIdentifierAction = (actualValue) => { Assert.Same(value, actualValue); callCount++; return result; }; ICodeGenerator iCodeGenerator = generator; Assert.Equal(result, iCodeGenerator.CreateEscapedIdentifier(value)); Assert.Equal(1, callCount); } [Theory] [InlineData(null, null)] [InlineData(null, "")] [InlineData(null, "identifier")] [InlineData("", null)] [InlineData("", "")] [InlineData("", "identifier")] [InlineData("identifier", null)] [InlineData("identifier", "")] [InlineData("identifier", "validIdentifier")] public void ICodeGeneratorCreateValidIdentifier_Invoke_ReturnsExpected(string value, string result) { CodeGeneratorTests generator = this; int callCount = 0; generator.CreateValidIdentifierAction = (actualValue) => { Assert.Same(value, actualValue); callCount++; return result; }; ICodeGenerator iCodeGenerator = generator; Assert.Equal(result, iCodeGenerator.CreateValidIdentifier(value)); Assert.Equal(1, callCount); } public static IEnumerable<object[]> GetTypeOutput_TestData() { yield return new object[] { null, null }; yield return new object[] { null, string.Empty }; yield return new object[] { null, "Output" }; yield return new object[] { new CodeTypeReference(), null }; yield return new object[] { new CodeTypeReference(), string.Empty }; yield return new object[] { new CodeTypeReference(), "Output" }; } [Theory] [MemberData(nameof(GetTypeOutput_TestData))] public void ICodeGeneratorGetTypeOutput_Invoke_ReturnsExpected(CodeTypeReference value, string result) { CodeGeneratorTests generator = this; int callCount = 0; generator.GetTypeOutputAction = (actualValue) => { Assert.Same(value, actualValue); callCount++; return result; }; ICodeGenerator iCodeGenerator = generator; Assert.Equal(result, iCodeGenerator.GetTypeOutput(value)); Assert.Equal(1, callCount); } public static IEnumerable<object[]> IsValidIdentifier_TestData() { foreach (bool result in new bool[] { true, false }) { yield return new object[] { null, result }; yield return new object[] { "", result }; yield return new object[] { "value", result }; } } [Theory] [MemberData(nameof(IsValidIdentifier_TestData))] public void ICodeGeneratorIsValidIdentifier_Invoke_ReturnsExpected(string value, bool result) { CodeGeneratorTests generator = this; int callCount = 0; generator.IsValidIdentifierAction = (actualValue) => { Assert.Same(value, actualValue); callCount++; return result; }; ICodeGenerator iCodeGenerator = generator; Assert.Equal(result, iCodeGenerator.IsValidIdentifier(value)); Assert.Equal(1, callCount); } public static IEnumerable<object[]> Supports_TestData() { foreach (bool result in new bool[] { true, false }) { yield return new object[] { GeneratorSupport.ArraysOfArrays - 1, result }; yield return new object[] { GeneratorSupport.AssemblyAttributes, result }; } } [Theory] [MemberData(nameof(Supports_TestData))] public void ICodeGeneratorSupports_Invoke_ReturnsExpected(GeneratorSupport support, bool result) { CodeGeneratorTests generator = this; int callCount = 0; generator.SupportsAction = (actualSupport) => { Assert.Equal(support, actualSupport); callCount++; return result; }; ICodeGenerator iCodeGenerator = generator; Assert.Equal(result, iCodeGenerator.Supports(support)); Assert.Equal(1, callCount); } [Theory] [InlineData(null)] [InlineData("")] [InlineData("value")] public void ICodeGeneratorValidateIdentifier_InvokeValid_Nop(string value) { CodeGeneratorTests generator = this; int isValidIdentifierCallCount = 0; generator.ValidateIdentifierAction = (actualValue, baseMethod) => baseMethod(actualValue); generator.IsValidIdentifierAction = (actualValue) => { Assert.Same(value, actualValue); isValidIdentifierCallCount++; return true; }; ICodeGenerator iCodeGenerator = generator; iCodeGenerator.ValidateIdentifier(value); Assert.Equal(1, isValidIdentifierCallCount); } [Theory] [InlineData(null)] [InlineData("")] [InlineData("value")] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Not fixed on NetFX")] public void ICodeGeneratorValidateIdentifier_InvokeInvalid_ThrowsArgumentException(string value) { CodeGeneratorTests generator = this; int isValidIdentifierCallCount = 0; generator.ValidateIdentifierAction = (actualValue, baseMethod) => baseMethod(actualValue); generator.IsValidIdentifierAction = (actualValue) => { Assert.Same(value, actualValue); isValidIdentifierCallCount++; return false; }; ICodeGenerator iCodeGenerator = generator; Assert.Throws<ArgumentException>("value", () => iCodeGenerator.ValidateIdentifier(value)); Assert.Equal(1, isValidIdentifierCallCount); } private void PerformActionWithOutput(Action<StringWriter> action, CodeGeneratorOptions options = null) { CodeGeneratorTests generator = this; ICodeGenerator iCodeGenerator = generator; var e = new CodeArrayCreateExpression(typeof(int)); var writer = new StringWriter(); int callCount = 0; generator.GenerateArrayCreateExpressionAction = (actualE) => { Assert.Same(e, actualE); Assert.Equal(0, generator.Indent); Assert.NotNull(generator.Output); if (options != null) { Assert.Same(options, generator.Options); } else { Assert.NotNull(generator.Options); } action(writer); callCount++; }; iCodeGenerator.GenerateCodeFromExpression(e, writer, options); Assert.Equal(1, callCount); } protected override string NullToken => "NullToken"; public Func<string, string> CreateEscapedIdentifierAction { get; set; } protected override string CreateEscapedIdentifier(string value) { return CreateEscapedIdentifierAction(value); } public Func<string, string> CreateValidIdentifierAction { get; set; } protected override string CreateValidIdentifier(string value) { return CreateValidIdentifierAction(value); } public Action<CodeArgumentReferenceExpression> GenerateArgumentReferenceExpressionAction { get; set; } protected override void GenerateArgumentReferenceExpression(CodeArgumentReferenceExpression e) { GenerateArgumentReferenceExpressionAction(e); } public Action<CodeArrayCreateExpression> GenerateArrayCreateExpressionAction { get; set; } protected override void GenerateArrayCreateExpression(CodeArrayCreateExpression e) { GenerateArrayCreateExpressionAction(e); } public Action<CodeArrayIndexerExpression> GenerateArrayIndexerExpressionAction { get; set; } protected override void GenerateArrayIndexerExpression(CodeArrayIndexerExpression e) { GenerateArrayIndexerExpressionAction(e); } public Action<CodeAssignStatement> GenerateAssignStatementAction { get; set; } protected override void GenerateAssignStatement(CodeAssignStatement e) { GenerateAssignStatementAction(e); } public Action<CodeAttachEventStatement> GenerateAttachEventStatementAction { get; set; } protected override void GenerateAttachEventStatement(CodeAttachEventStatement e) { GenerateAttachEventStatementAction(e); } public Action<CodeAttributeDeclarationCollection> GenerateAttributeDeclarationsEndAction { get; set; } protected override void GenerateAttributeDeclarationsEnd(CodeAttributeDeclarationCollection attributes) { GenerateAttributeDeclarationsEndAction(attributes); } public Action<CodeAttributeDeclarationCollection> GenerateAttributeDeclarationsStartAction { get; set; } protected override void GenerateAttributeDeclarationsStart(CodeAttributeDeclarationCollection attributes) { GenerateAttributeDeclarationsStartAction(attributes); } public Action<CodeBaseReferenceExpression> GenerateBaseReferenceExpressionAction { get; set; } protected override void GenerateBaseReferenceExpression(CodeBaseReferenceExpression e) { GenerateBaseReferenceExpressionAction(e); } public Action<CodeCastExpression> GenerateCastExpressionAction { get; set; } protected override void GenerateCastExpression(CodeCastExpression e) { GenerateCastExpressionAction(e); } public Action<CodeComment> GenerateCommentAction { get; set; } protected override void GenerateComment(CodeComment e) { GenerateCommentAction(e); } public Action<CodeCommentStatementCollection, Action<CodeCommentStatementCollection>> GenerateCommentStatementsAction { get; set; } protected override void GenerateCommentStatements(CodeCommentStatementCollection e) { if (e != null) { if (e.GetEnumerator().MoveNext()) { GenerateCommentStatementsAction(e, base.GenerateCommentStatements); } } else { GenerateCommentStatementsAction(e, base.GenerateCommentStatements); } } public Action<CodeCompileUnit, Action<CodeCompileUnit>> GenerateCompileUnitAction { get; set; } protected override void GenerateCompileUnit(CodeCompileUnit e) { GenerateCompileUnitAction(e, base.GenerateCompileUnit); } public Action<CodeCompileUnit, Action<CodeCompileUnit>> GenerateCompileUnitEndAction { get; set; } protected override void GenerateCompileUnitEnd(CodeCompileUnit e) { GenerateCompileUnitEndAction(e, base.GenerateCompileUnitEnd); } public Action<CodeCompileUnit, Action<CodeCompileUnit>> GenerateCompileUnitStartAction { get; set; } protected override void GenerateCompileUnitStart(CodeCompileUnit e) { GenerateCompileUnitStartAction(e, base.GenerateCompileUnitStart); } public Action<CodeConditionStatement> GenerateConditionStatementAction { get; set; } protected override void GenerateConditionStatement(CodeConditionStatement e) { GenerateConditionStatementAction(e); } public Action<CodeConstructor, CodeTypeDeclaration> GenerateConstructorAction { get; set; } protected override void GenerateConstructor(CodeConstructor e, CodeTypeDeclaration c) { GenerateConstructorAction(e, c); } public Action<decimal, Action<decimal>> GenerateDecimalValueAction { get; set; } protected override void GenerateDecimalValue(decimal d) { GenerateDecimalValueAction(d, base.GenerateDecimalValue); } public Action<CodeDefaultValueExpression, Action<CodeDefaultValueExpression>> GenerateDefaultValueExpressionAction { get; set; } protected override void GenerateDefaultValueExpression(CodeDefaultValueExpression e) { GenerateDefaultValueExpressionAction(e, base.GenerateDefaultValueExpression); } public Action<CodeDelegateCreateExpression> GenerateDelegateCreateExpressionAction { get; set; } protected override void GenerateDelegateCreateExpression(CodeDelegateCreateExpression e) { GenerateDelegateCreateExpressionAction(e); } public Action<CodeDelegateInvokeExpression> GenerateDelegateInvokeExpressionAction { get; set; } protected override void GenerateDelegateInvokeExpression(CodeDelegateInvokeExpression e) { GenerateDelegateInvokeExpressionAction(e); } public Action<CodeDirectionExpression, Action<CodeDirectionExpression>> GenerateDirectionExpressionAction { get; set; } protected override void GenerateDirectionExpression(CodeDirectionExpression e) { GenerateDirectionExpressionAction(e, base.GenerateDirectionExpression); } public Action<CodeDirectiveCollection, Action<CodeDirectiveCollection>> GenerateDirectivesAction { get; set; } protected override void GenerateDirectives(CodeDirectiveCollection directives) { if (directives != null && directives.GetEnumerator().MoveNext()) { GenerateDirectivesAction(directives, base.GenerateDirectives); } } public Action<double, Action<double>> GenerateDoubleValueAction { get; set; } protected override void GenerateDoubleValue(double d) { GenerateDoubleValueAction(d, base.GenerateDoubleValue); } public Action<CodeEntryPointMethod, CodeTypeDeclaration> GenerateEntryPointMethodAction { get; set; } protected override void GenerateEntryPointMethod(CodeEntryPointMethod e, CodeTypeDeclaration c) { GenerateEntryPointMethodAction(e, c); } public Action<CodeMemberEvent, CodeTypeDeclaration> GenerateEventAction { get; set; } protected override void GenerateEvent(CodeMemberEvent e, CodeTypeDeclaration c) { GenerateEventAction(e, c); } public Action<CodeEventReferenceExpression> GenerateEventReferenceExpressionAction { get; set; } protected override void GenerateEventReferenceExpression(CodeEventReferenceExpression e) { GenerateEventReferenceExpressionAction(e); } public Action<CodeExpressionStatement> GenerateExpressionStatementAction { get; set; } protected override void GenerateExpressionStatement(CodeExpressionStatement e) { GenerateExpressionStatementAction(e); } public Action<CodeMemberField> GenerateFieldAction { get; set; } protected override void GenerateField(CodeMemberField e) { GenerateFieldAction(e); } public Action<CodeFieldReferenceExpression> GenerateFieldReferenceExpressionAction { get; set; } protected override void GenerateFieldReferenceExpression(CodeFieldReferenceExpression e) { GenerateFieldReferenceExpressionAction(e); } public Action<CodeGotoStatement> GenerateGotoStatementAction { get; set; } protected override void GenerateGotoStatement(CodeGotoStatement e) { GenerateGotoStatementAction(e); } public Action<CodeIndexerExpression> GenerateIndexerExpressionAction { get; set; } protected override void GenerateIndexerExpression(CodeIndexerExpression e) { GenerateIndexerExpressionAction(e); } public Action<CodeIterationStatement> GenerateIterationStatementAction { get; set; } protected override void GenerateIterationStatement(CodeIterationStatement e) { GenerateIterationStatementAction(e); } public Action<CodeLabeledStatement> GenerateLabeledStatementAction { get; set; } protected override void GenerateLabeledStatement(CodeLabeledStatement e) { GenerateLabeledStatementAction(e); } public Action<CodeLinePragma> GenerateLinePragmaEndAction { get; set; } protected override void GenerateLinePragmaEnd(CodeLinePragma e) { GenerateLinePragmaEndAction(e); } public Action<CodeLinePragma> GenerateLinePragmaStartAction { get; set; } protected override void GenerateLinePragmaStart(CodeLinePragma e) { GenerateLinePragmaStartAction(e); } public Action<CodeMemberMethod, CodeTypeDeclaration> GenerateMethodAction { get; set; } protected override void GenerateMethod(CodeMemberMethod e, CodeTypeDeclaration c) { GenerateMethodAction(e, c); } public Action<CodeMethodInvokeExpression> GenerateMethodInvokeExpressionAction { get; set; } protected override void GenerateMethodInvokeExpression(CodeMethodInvokeExpression e) { GenerateMethodInvokeExpressionAction(e); } public Action<CodeMethodReferenceExpression> GenerateMethodReferenceExpressionAction { get; set; } protected override void GenerateMethodReferenceExpression(CodeMethodReferenceExpression e) { GenerateMethodReferenceExpressionAction(e); } public Action<CodeMethodReturnStatement> GenerateMethodReturnStatementAction { get; set; } protected override void GenerateMethodReturnStatement(CodeMethodReturnStatement e) { GenerateMethodReturnStatementAction(e); } public Action<CodeNamespace, Action<CodeNamespace>> GenerateNamespaceAction { get; set; } protected override void GenerateNamespace(CodeNamespace e) { GenerateNamespaceAction(e, base.GenerateNamespace); } public Action<CodeNamespace> GenerateNamespaceEndAction { get; set; } protected override void GenerateNamespaceEnd(CodeNamespace e) { GenerateNamespaceEndAction(e); } public Action<CodeNamespaceImport> GenerateNamespaceImportAction { get; set; } protected override void GenerateNamespaceImport(CodeNamespaceImport e) { GenerateNamespaceImportAction(e); } public Action<CodeNamespace> GenerateNamespaceStartAction { get; set; } protected override void GenerateNamespaceStart(CodeNamespace e) { GenerateNamespaceStartAction(e); } public Action<CodeObjectCreateExpression> GenerateObjectCreateExpressionAction { get; set; } protected override void GenerateObjectCreateExpression(CodeObjectCreateExpression e) { GenerateObjectCreateExpressionAction(e); } public Action<CodeParameterDeclarationExpression, Action<CodeParameterDeclarationExpression>> GenerateParameterDeclarationExpressionAction { get; set; } protected override void GenerateParameterDeclarationExpression(CodeParameterDeclarationExpression e) { GenerateParameterDeclarationExpressionAction(e, base.GenerateParameterDeclarationExpression); } public Action<CodePrimitiveExpression, Action<CodePrimitiveExpression>> GeneratePrimitiveExpressionAction { get; set; } protected override void GeneratePrimitiveExpression(CodePrimitiveExpression e) { GeneratePrimitiveExpressionAction(e, base.GeneratePrimitiveExpression); } public Action<CodeMemberProperty, CodeTypeDeclaration> GeneratePropertyAction { get; set; } protected override void GenerateProperty(CodeMemberProperty e, CodeTypeDeclaration c) { GeneratePropertyAction(e, c); } public Action<CodePropertyReferenceExpression> GeneratePropertyReferenceExpressionAction { get; set; } protected override void GeneratePropertyReferenceExpression(CodePropertyReferenceExpression e) { GeneratePropertyReferenceExpressionAction(e); } public Action<CodePropertySetValueReferenceExpression> GeneratePropertySetValueReferenceExpressionAction { get; set; } protected override void GeneratePropertySetValueReferenceExpression(CodePropertySetValueReferenceExpression e) { GeneratePropertySetValueReferenceExpressionAction(e); } public Action<CodeRemoveEventStatement> GenerateRemoveEventStatementAction { get; set; } protected override void GenerateRemoveEventStatement(CodeRemoveEventStatement e) { GenerateRemoveEventStatementAction(e); } public Action<float, Action<float>> GenerateSingleFloatValueAction { get; set; } protected override void GenerateSingleFloatValue(float s) { GenerateSingleFloatValueAction(s, base.GenerateSingleFloatValue); } public Action<CodeSnippetExpression> GenerateSnippetExpressionAction { get; set; } protected override void GenerateSnippetExpression(CodeSnippetExpression e) { GenerateSnippetExpressionAction(e); } public Action<CodeSnippetTypeMember> GenerateSnippetMemberAction { get; set; } protected override void GenerateSnippetMember(CodeSnippetTypeMember e) { GenerateSnippetMemberAction(e); } public Action<CodeSnippetStatement, Action<CodeSnippetStatement>> GenerateSnippetStatementAction { get; set; } protected override void GenerateSnippetStatement(CodeSnippetStatement e) { GenerateSnippetStatementAction(e, base.GenerateSnippetStatement); } public Action<CodeThisReferenceExpression> GenerateThisReferenceExpressionAction { get; set; } protected override void GenerateThisReferenceExpression(CodeThisReferenceExpression e) { GenerateThisReferenceExpressionAction(e); } public Action<CodeThrowExceptionStatement> GenerateThrowExceptionStatementAction { get; set; } protected override void GenerateThrowExceptionStatement(CodeThrowExceptionStatement e) { GenerateThrowExceptionStatementAction(e); } public Action<CodeTryCatchFinallyStatement> GenerateTryCatchFinallyStatementAction { get; set; } protected override void GenerateTryCatchFinallyStatement(CodeTryCatchFinallyStatement e) { GenerateTryCatchFinallyStatementAction(e); } public Action<CodeTypeConstructor> GenerateTypeConstructorAction { get; set; } protected override void GenerateTypeConstructor(CodeTypeConstructor e) { GenerateTypeConstructorAction(e); } public Action<CodeTypeDeclaration> GenerateTypeEndAction { get; set; } protected override void GenerateTypeEnd(CodeTypeDeclaration e) { GenerateTypeEndAction(e); } public Action<CodeTypeOfExpression, Action<CodeTypeOfExpression>> GenerateTypeOfExpressionAction { get; set; } protected override void GenerateTypeOfExpression(CodeTypeOfExpression e) { GenerateTypeOfExpressionAction(e, base.GenerateTypeOfExpression); } public Action<CodeTypeReferenceExpression, Action<CodeTypeReferenceExpression>> GenerateTypeReferenceExpressionAction { get; set; } protected override void GenerateTypeReferenceExpression(CodeTypeReferenceExpression e) { GenerateTypeReferenceExpressionAction(e, base.GenerateTypeReferenceExpression); } public Action<CodeTypeDeclaration> GenerateTypeStartAction { get; set; } protected override void GenerateTypeStart(CodeTypeDeclaration e) { GenerateTypeStartAction(e); } public Action<CodeVariableDeclarationStatement> GenerateVariableDeclarationStatementAction { get; set; } protected override void GenerateVariableDeclarationStatement(CodeVariableDeclarationStatement e) { GenerateVariableDeclarationStatementAction(e); } public Action<CodeVariableReferenceExpression> GenerateVariableReferenceExpressionAction { get; set; } protected override void GenerateVariableReferenceExpression(CodeVariableReferenceExpression e) { GenerateVariableReferenceExpressionAction(e); } public Func<CodeTypeReference, string> GetTypeOutputAction { get; set; } protected override string GetTypeOutput(CodeTypeReference value) { return GetTypeOutputAction(value); } public Func<string, bool> IsValidIdentifierAction { get; set; } protected override bool IsValidIdentifier(string value) { return IsValidIdentifierAction(value); } public Action<CodeAttributeArgument, Action<CodeAttributeArgument>> OutputAttributeArgumentAction { get; set; } protected override void OutputAttributeArgument(CodeAttributeArgument arg) { OutputAttributeArgumentAction(arg, base.OutputAttributeArgument); } public Action<CodeAttributeDeclarationCollection, Action<CodeAttributeDeclarationCollection>> OutputAttributeDeclarationsAction { get; set; } protected override void OutputAttributeDeclarations(CodeAttributeDeclarationCollection attributes) { OutputAttributeDeclarationsAction(attributes, base.OutputAttributeDeclarations); } public Action<FieldDirection, Action<FieldDirection>> OutputDirectionAction { get; set; } protected override void OutputDirection(FieldDirection dir) { OutputDirectionAction(dir, base.OutputDirection); } public Action<MemberAttributes, Action<MemberAttributes>> OutputFieldScopeModifierAction { get; set; } protected override void OutputFieldScopeModifier(MemberAttributes attributes) { OutputFieldScopeModifierAction(attributes, base.OutputFieldScopeModifier); } public Action<string, Action<string>> OutputIdentifierAction { get; set; } protected override void OutputIdentifier(string ident) { OutputIdentifierAction(ident, base.OutputIdentifier); } public Action<MemberAttributes, Action<MemberAttributes>> OutputMemberAccessModifierAction { get; set; } protected override void OutputMemberAccessModifier(MemberAttributes attributes) { OutputMemberAccessModifierAction(attributes, base.OutputMemberAccessModifier); } public Action<MemberAttributes, Action<MemberAttributes>> OutputMemberScopeModifierAction { get; set; } protected override void OutputMemberScopeModifier(MemberAttributes attributes) { OutputMemberScopeModifierAction(attributes, base.OutputMemberScopeModifier); } public Action<CodeBinaryOperatorType, Action<CodeBinaryOperatorType>> OutputOperatorAction { get; set; } protected override void OutputOperator(CodeBinaryOperatorType op) { OutputOperatorAction(op, base.OutputOperator); } public Action<CodeParameterDeclarationExpressionCollection, Action<CodeParameterDeclarationExpressionCollection>> OutputParametersAction { get; set; } protected override void OutputParameters(CodeParameterDeclarationExpressionCollection parameters) { OutputParametersAction(parameters, base.OutputParameters); } public Action<CodeTypeReference> OutputTypeAction { get; set; } protected override void OutputType(CodeTypeReference typeRef) { OutputTypeAction(typeRef); } public Action<TypeAttributes, bool, bool, Action<TypeAttributes, bool, bool>> OutputTypeAttributesAction { get; set; } protected override void OutputTypeAttributes(TypeAttributes attributes, bool isStruct, bool isEnum) { OutputTypeAttributesAction(attributes, isStruct, isEnum, base.OutputTypeAttributes); } public Action<CodeTypeReference, string, Action<CodeTypeReference, string>> OutputTypeNamePairAction { get; set; } protected override void OutputTypeNamePair(CodeTypeReference typeRef, string name) { OutputTypeNamePairAction(typeRef, name, base.OutputTypeNamePair); } public Func<string, string> QuoteSnippetStringAction { get; set; } protected override string QuoteSnippetString(string value) { return QuoteSnippetStringAction(value); } public Func<GeneratorSupport, bool> SupportsAction { get; set; } protected override bool Supports(GeneratorSupport support) { return SupportsAction(support); } public Action<string, Action<string>> ValidateIdentifierAction { get; set; } protected override void ValidateIdentifier(string value) { ValidateIdentifierAction(value, base.ValidateIdentifier); } private class CustomCodeExpression : CodeExpression { } private class CustomCodeStatement : CodeStatement { } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.ComponentModel; using System.Diagnostics; using System.Globalization; using System.IO; using System.Reflection; using System.Text; using Xunit; namespace System.CodeDom.Compiler.Tests { public class CodeGeneratorTests : CodeGenerator { [Fact] public void Ctor_Default() { CodeGeneratorTests generator = this; Assert.Null(generator.CurrentClass); Assert.Null(generator.CurrentMember); Assert.Equal("<% unknown %>", generator.CurrentMemberName); Assert.Equal("<% unknown %>", generator.CurrentTypeName); Assert.Throws<NullReferenceException>(() => generator.Indent); Assert.False(generator.IsCurrentClass); Assert.False(generator.IsCurrentDelegate); Assert.False(generator.IsCurrentEnum); Assert.False(generator.IsCurrentInterface); Assert.False(generator.IsCurrentStruct); Assert.Null(generator.Options); Assert.Null(generator.Output); } [Theory] [InlineData(null, "")] [InlineData("", "")] [InlineData("st", "st")] public void ContinueOnNewLine_InvokeWithOutput_Appends(string st, string expected) { CodeGeneratorTests generator = this; generator.PerformActionWithOutput(writer => { generator.ContinueOnNewLine(st); Assert.Equal(expected + Environment.NewLine, writer.ToString()); }); } [Theory] [InlineData(null)] [InlineData("")] [InlineData("st")] public void ContinueOnNewLine_InvokeWithoutOutput_ThrowsNullReferenceException(string st) { CodeGeneratorTests generator = this; Assert.Throws<NullReferenceException>(() => generator.ContinueOnNewLine(st)); } [Theory] [InlineData(-1, 0)] [InlineData(0, 0)] [InlineData(3, 3)] public void Indent_SetWithOutput_GetReturnsExpected(int value, int expected) { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { generator.Indent = value; Assert.Equal(expected, generator.Indent); }); } [Fact] public void Indent_SetWithoutOutput_ThrowsNullReferenceException() { CodeGeneratorTests generator = this; Assert.Throws<NullReferenceException>(() => generator.Indent = 1); } public static IEnumerable<object[]> GenerateBinaryOperatorExpression_TestData() { yield return new object[] { new CodeBinaryOperatorExpression(new CodePrimitiveExpression(1), CodeBinaryOperatorType.Add, new CodePrimitiveExpression(2)), "(1 + 2)" }; yield return new object[] { new CodeBinaryOperatorExpression(new CodeBinaryOperatorExpression(new CodePrimitiveExpression(1), CodeBinaryOperatorType.Multiply, new CodePrimitiveExpression(2)), CodeBinaryOperatorType.Add, new CodePrimitiveExpression(3)), $"((1 * 2) {Environment.NewLine} + 3)" }; yield return new object[] { new CodeBinaryOperatorExpression(new CodePrimitiveExpression(1), CodeBinaryOperatorType.Multiply, new CodeBinaryOperatorExpression(new CodePrimitiveExpression(2), CodeBinaryOperatorType.Add, new CodePrimitiveExpression(3))), $"(1 {Environment.NewLine} * (2 + 3))" }; } [Theory] [MemberData(nameof(GenerateBinaryOperatorExpression_TestData))] public void GenerateBinaryOperatorExpression_Invoke_Success(CodeBinaryOperatorExpression e, string expected) { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { generator.OutputIdentifierAction = (actualIdentifier, baseMethod) => baseMethod(actualIdentifier); generator.OutputOperatorAction = (actualOp, baseMethod) => baseMethod(actualOp); generator.GeneratePrimitiveExpressionAction = (actualE, baseMethod) => baseMethod(actualE); generator.GenerateBinaryOperatorExpression(e); Assert.Equal(expected, writer.ToString()); // Call again to make sure indent is reset. Assert.Equal(expected, writer.ToString()); }); } [Fact] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Not fixed on NetFX")] public void GenerateBinaryOperatorExpression_NullE_ThrowsArgumentNullException() { CodeGeneratorTests generator = this; Assert.Throws<ArgumentNullException>("e", () => generator.GenerateBinaryOperatorExpression(null)); } [Fact] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Not fixed on NetFX")] public void GenerateBinaryOperatorExpression_NullLeftE_ThrowsArgumentNullException() { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { var e = new CodeBinaryOperatorExpression(null, CodeBinaryOperatorType.Add, new CodePrimitiveExpression(1)); generator.OutputOperatorAction = (actualOp, baseMethod) => baseMethod(actualOp); generator.GeneratePrimitiveExpressionAction = (actualE, baseMethod) => baseMethod(actualE); Assert.Throws<ArgumentNullException>("e", () => generator.GenerateBinaryOperatorExpression(null)); }); } [Fact] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Not fixed on NetFX")] public void GenerateBinaryOperatorExpression_NullRightE_ThrowsArgumentNullException() { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { var e = new CodeBinaryOperatorExpression(new CodePrimitiveExpression(1), CodeBinaryOperatorType.Add, null); generator.OutputOperatorAction = (actualOp, baseMethod) => baseMethod(actualOp); generator.GeneratePrimitiveExpressionAction = (actualE, baseMethod) => baseMethod(actualE); Assert.Throws<ArgumentNullException>("e", () => generator.GenerateBinaryOperatorExpression(null)); }); } [Fact] public void GenerateBinaryOperatorExpression_InvokeWithoutOutput_ThrowsNullReferenceException() { CodeGeneratorTests generator = this; var e = new CodeBinaryOperatorExpression(); generator.OutputOperatorAction = (actualOp, baseMethod) => baseMethod(actualOp); generator.GeneratePrimitiveExpressionAction = (actualE, baseMethod) => baseMethod(actualE); Assert.Throws<NullReferenceException>(() => generator.GenerateBinaryOperatorExpression(e)); } public static IEnumerable<object[]> GenerateCodeFromMember_TestData() { yield return new object[] { new CodeTypeMember(), null, Environment.NewLine }; yield return new object[] { new CodeTypeMember(), new CodeGeneratorOptions(), Environment.NewLine }; yield return new object[] { new CodeTypeMember(), new CodeGeneratorOptions { BlankLinesBetweenMembers = false}, string.Empty }; } [Theory] [MemberData(nameof(GenerateCodeFromMember_TestData))] public void GenerateCodeFromMember_Invoke_Success(CodeTypeMember member, CodeGeneratorOptions options, string expected) { CodeGeneratorTests generator = this; generator.GenerateCommentStatementsAction = (actualE, baseMethod) => baseMethod(actualE); var writer = new StringWriter(); generator.GenerateCodeFromMember(member, writer, options); Assert.Equal(expected, writer.ToString()); Assert.Null(generator.Output); Assert.Null(generator.Options); Assert.Null(generator.CurrentClass); Assert.Null(generator.CurrentMember); Assert.Equal("<% unknown %>", generator.CurrentMemberName); Assert.Equal("<% unknown %>", generator.CurrentTypeName); } [Fact] public void GenerateCodeFromMember_InvokeWithCommentsDirectivesAndLinePragma_Success() { CodeGeneratorTests generator = this; var member = new CodeTypeMember { LinePragma = new CodeLinePragma() }; member.Comments.Add(new CodeCommentStatement("Comment")); member.Comments.Add(new CodeCommentStatement("Comment")); member.StartDirectives.Add(new CodeDirective()); member.StartDirectives.Add(new CodeChecksumPragma()); member.StartDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); member.EndDirectives.Add(new CodeDirective()); member.EndDirectives.Add(new CodeChecksumPragma()); member.EndDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); var writer = new StringWriter(); int generateCommentStatementsCallCount = 0; int generateCommentCallCount = 0; int generateLinePragmaStartCallCount = 0; int generateDirectivesCallCount = 0; int generateLinePragmaEndCallCount = 0; generator.GenerateCommentStatementsAction = (actualE, baseMethod) => { baseMethod(actualE); Assert.Same(member.Comments, actualE); writer.Write("Comments "); Assert.Equal(0, generateLinePragmaStartCallCount); Assert.Equal(1, generateDirectivesCallCount); Assert.Equal(0, generateLinePragmaEndCallCount); generateCommentStatementsCallCount++; }; generator.GenerateCommentAction = (actualE) => { Assert.Same(member.Comments[generateCommentCallCount].Comment, actualE); writer.Write("Comment "); generateCommentCallCount++; }; generator.GenerateLinePragmaStartAction = (actualE) => { Assert.Same(member.LinePragma, actualE); Assert.Equal(0, generateLinePragmaEndCallCount); Assert.Equal(1, generateDirectivesCallCount); writer.Write("LinePragmaStart "); generateLinePragmaStartCallCount++; }; generator.GenerateLinePragmaEndAction = (actualE) => { Assert.Same(member.LinePragma, actualE); Assert.Equal(1, generateDirectivesCallCount); writer.Write("LinePragmaEnd "); generateLinePragmaEndCallCount++; }; generator.GenerateDirectivesAction = (actualDirectives, baseMethod) => { baseMethod(actualDirectives); Assert.Same(generateDirectivesCallCount == 0 ? member.StartDirectives : member.EndDirectives, actualDirectives); writer.Write(generateDirectivesCallCount == 0 ? "StartDirectives " : "EndDirectives"); generateDirectivesCallCount++; }; generator.GenerateCodeFromMember(member, writer, null); Assert.Equal($"{Environment.NewLine}StartDirectives Comment Comment Comments LinePragmaStart LinePragmaEnd EndDirectives", writer.ToString()); Assert.Equal(1, generateCommentStatementsCallCount); Assert.Equal(2, generateCommentCallCount); Assert.Equal(1, generateLinePragmaStartCallCount); Assert.Equal(2, generateDirectivesCallCount); Assert.Equal(1, generateLinePragmaEndCallCount); } [Fact] public void GenerateCodeFromMember_CodeConstructor_CallsCorrectMethod() { CodeGeneratorTests generator = this; var writer = new StringWriter(); var member = new CodeConstructor(); int callCount = 0; generator.GenerateConstructorAction = (actualE, type) => { Assert.Same(member, actualE); Assert.IsType<CodeTypeDeclaration>(type); callCount++; }; generator.GenerateCodeFromMember(member, writer, null); Assert.Equal(Environment.NewLine, writer.ToString()); Assert.Equal(1, callCount); } [Fact] public void GenerateCodeFromMember_CodeEntryPointMethod_CallsCorrectMethod() { CodeGeneratorTests generator = this; var writer = new StringWriter(); var member = new CodeEntryPointMethod(); int callCount = 0; generator.GenerateEntryPointMethodAction = (actualE, type) => { Assert.Same(member, actualE); Assert.IsType<CodeTypeDeclaration>(type); callCount++; }; generator.GenerateCodeFromMember(member, writer, null); Assert.Equal(Environment.NewLine, writer.ToString()); Assert.Equal(1, callCount); } [Fact] public void GenerateCodeFromMember_CodeMemberEvent_CallsCorrectMethod() { CodeGeneratorTests generator = this; var writer = new StringWriter(); var member = new CodeMemberEvent(); int callCount = 0; generator.GenerateEventAction = (actualE, type) => { Assert.Same(member, actualE); Assert.IsType<CodeTypeDeclaration>(type); callCount++; }; generator.GenerateCodeFromMember(member, writer, null); Assert.Equal(Environment.NewLine, writer.ToString()); Assert.Equal(1, callCount); } [Fact] public void GenerateCodeFromMember_CodeMemberField_CallsCorrectMethod() { CodeGeneratorTests generator = this; var writer = new StringWriter(); var member = new CodeMemberField(); int callCount = 0; generator.GenerateFieldAction = (actualE) => { Assert.Same(member, actualE); callCount++; }; generator.GenerateCodeFromMember(member, writer, null); Assert.Equal(Environment.NewLine, writer.ToString()); Assert.Equal(1, callCount); } [Fact] public void GenerateCodeFromMember_CodeMemberMethod_CallsCorrectMethod() { CodeGeneratorTests generator = this; var writer = new StringWriter(); var member = new CodeMemberMethod(); int callCount = 0; generator.GenerateMethodAction = (actualE, type) => { Assert.Same(member, actualE); Assert.IsType<CodeTypeDeclaration>(type); callCount++; }; generator.GenerateCodeFromMember(member, writer, null); Assert.Equal(Environment.NewLine, writer.ToString()); Assert.Equal(1, callCount); } [Fact] public void GenerateCodeFromMember_CodeMemberProperty_CallsCorrectMethod() { CodeGeneratorTests generator = this; var writer = new StringWriter(); var member = new CodeMemberProperty(); int callCount = 0; generator.GeneratePropertyAction = (actualE, type) => { Assert.Same(member, actualE); Assert.IsType<CodeTypeDeclaration>(type); callCount++; }; generator.GenerateCodeFromMember(member, writer, null); Assert.Equal(Environment.NewLine, writer.ToString()); Assert.Equal(1, callCount); } [Fact] public void GenerateCodeFromMember_CodeSnippetTypeMember_CallsCorrectMethod() { CodeGeneratorTests generator = this; var writer = new StringWriter(); var member = new CodeSnippetTypeMember(); int callCount = 0; generator.GenerateSnippetMemberAction = (actualE) => { Assert.Same(member, actualE); callCount++; }; generator.GenerateCodeFromMember(member, writer, null); Assert.Equal(Environment.NewLine + Environment.NewLine, writer.ToString()); } [Fact] public void GenerateCodeFromMember_CodeTypeConstructor_CallsCorrectMethod() { CodeGeneratorTests generator = this; var writer = new StringWriter(); var member = new CodeTypeConstructor(); int callCount = 0; generator.GenerateTypeConstructorAction = (actualE) => { Assert.Same(member, actualE); callCount++; }; generator.GenerateCodeFromMember(member, writer, null); Assert.Equal(Environment.NewLine, writer.ToString()); Assert.Equal(1, callCount); } [Fact] public void GenerateCodeFromMember_InvokeWithOutput_ThrowsInvalidOperationException() { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { Assert.Throws<InvalidOperationException>(() => generator.GenerateCodeFromMember(new CodeTypeMember(), new StringWriter(), new CodeGeneratorOptions())); }); } [Fact] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Not fixed on NetFX")] public void GenerateCodeFromMember_NullMember_ThrowsArgumentNullException() { CodeGeneratorTests generator = this; Assert.Throws<ArgumentNullException>("member", () => generator.GenerateCodeFromMember(null, new StringWriter(), new CodeGeneratorOptions())); } [Fact] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Not fixed on NetFX")] public void GenerateCodeFromMember_NullWriter_ThrowsArgumentNullException() { CodeGeneratorTests generator = this; Assert.Throws<ArgumentNullException>("writer", () => generator.GenerateCodeFromMember(new CodeTypeMember(), null, new CodeGeneratorOptions())); } [Theory] [InlineData(null)] [InlineData("")] [InlineData("text")] public void GenerateCommentStatement_Invoke_CallsCorrectMethod(string text) { CodeGeneratorTests generator = this; var e = new CodeCommentStatement(text); int callCount = 0; generator.GenerateCommentAction = (actualComment) => { Assert.Same(e.Comment, actualComment); callCount++; }; generator.GenerateCommentStatement(e); Assert.Equal(1, callCount); } [Fact] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Not fixed on NetFX")] public void GenerateCommentStatement_NullE_ThrowsArgumentNullException() { CodeGeneratorTests generator = this; Assert.Throws<ArgumentNullException>("e", () => generator.GenerateCommentStatement(null)); } [Fact] public void GenerateCommentStatement_NullEComment_ThrowsArgumentException() { CodeGeneratorTests generator = this; var e = new CodeCommentStatement(); Assert.Throws<ArgumentException>("e", () => generator.GenerateCommentStatement(e)); } [Theory] [InlineData(null)] [InlineData("")] [InlineData("text")] public void GenerateCommentStatements_InvokeNonEmpty_CallsCorrectMethod(string text) { CodeGeneratorTests generator = this; generator.GenerateCommentStatementsAction = (actualE, baseMethod) => baseMethod(actualE); var e = new CodeCommentStatementCollection(new CodeCommentStatement[] { new CodeCommentStatement(text), new CodeCommentStatement("otherText") }); int callCount = 0; generator.GenerateCommentAction = (actualComment) => { Assert.Same(e[callCount].Comment, actualComment); callCount++; }; generator.GenerateCommentStatements(e); Assert.Equal(2, callCount); } [Fact] public void GenerateCommentStatements_InvokeEmptyE_Nop() { CodeGeneratorTests generator = this; generator.GenerateCommentStatementsAction = (actualE, baseMethod) => baseMethod(actualE); generator.GenerateCommentStatements(new CodeCommentStatementCollection()); } [Fact] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Not fixed on NetFX")] public void GenerateCommentStatements_NullE_ThrowsArgumentNullException() { CodeGeneratorTests generator = this; generator.GenerateCommentStatementsAction = (actualE, baseMethod) => baseMethod(actualE); Assert.Throws<ArgumentNullException>("e", () => generator.GenerateCommentStatements(null)); } [Fact] public void GenerateCommentStatements_NullValueInE_ThrowsArgumentException() { CodeGeneratorTests generator = this; generator.GenerateCommentStatementsAction = (actualE, baseMethod) => baseMethod(actualE); var e = new CodeCommentStatementCollection(new CodeCommentStatement[] { new CodeCommentStatement() }); Assert.Throws<ArgumentException>("e", () => generator.GenerateCommentStatements(e)); } public static IEnumerable<object[]> GenerateCompileUnit_TestData() { yield return new object[] { new CodeCompileUnit() }; } [Theory] [MemberData(nameof(GenerateCompileUnit_TestData))] public void GenerateCompileUnit_InvokeWithOutput_Success(CodeCompileUnit e) { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { generator.GenerateCompileUnitAction = (actualE, baseMethod) => baseMethod(actualE); int generateCompileUnitStartCallCount = 0; int generateCompileUnitEndCallCount = 0; generator.GenerateCompileUnitStartAction = (actualE, baseMethod) => { baseMethod(actualE); Assert.Same(e, actualE); Assert.Equal(0, generateCompileUnitEndCallCount); generateCompileUnitStartCallCount++; }; generator.GenerateCompileUnitEndAction = (actualE, baseMethod) => { baseMethod(actualE); Assert.Same(e, actualE); generateCompileUnitEndCallCount++; }; generator.GenerateCompileUnit(e); Assert.Equal(1, generateCompileUnitStartCallCount); Assert.Equal(1, generateCompileUnitEndCallCount); Assert.Empty(writer.ToString()); }); } [Fact] public void GenerateCompileUnit_InvokeWithDirectives_Success() { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { generator.GenerateCompileUnitAction = (actualE, baseMethod) => baseMethod(actualE); var e = new CodeSnippetCompileUnit("value") { LinePragma = new CodeLinePragma() }; e.StartDirectives.Add(new CodeDirective()); e.StartDirectives.Add(new CodeChecksumPragma()); e.StartDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); e.EndDirectives.Add(new CodeDirective()); e.EndDirectives.Add(new CodeChecksumPragma()); e.EndDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); int generateCompileUnitStartCallCount = 0; int generateCompileUnitEndCallCount = 0; int generateDirectivesCallCount = 0; generator.GenerateCompileUnitStartAction = (actualE, baseMethod) => { baseMethod(actualE); Assert.Same(e, actualE); Assert.Equal(0, generateCompileUnitEndCallCount); Assert.Equal(1, generateDirectivesCallCount); generateCompileUnitStartCallCount++; }; generator.GenerateCompileUnitEndAction = (actualE, baseMethod) => { baseMethod(actualE); Assert.Same(e, actualE); Assert.Equal(2, generateDirectivesCallCount); generateCompileUnitEndCallCount++; }; generator.GenerateDirectivesAction = (actualDirectives, baseMethod) => { Assert.Same(generateDirectivesCallCount == 0 ? e.StartDirectives : e.EndDirectives, actualDirectives); writer.Write(generateDirectivesCallCount == 0 ? "StartDirectives " : "EndDirectives"); generateDirectivesCallCount++; }; generator.GenerateCompileUnit(e); Assert.Equal(1, generateCompileUnitStartCallCount); Assert.Equal(1, generateCompileUnitEndCallCount); Assert.Equal(2, generateDirectivesCallCount); Assert.Equal("StartDirectives EndDirectives", writer.ToString()); }); } [Theory] [MemberData(nameof(GenerateCompileUnit_TestData))] public void GenerateCompileUnit_InvokeWithoutOutput_Success(CodeCompileUnit e) { CodeGeneratorTests generator = this; generator.GenerateCompileUnitAction = (actualE, baseMethod) => baseMethod(actualE); int generateCompileUnitStartCallCount = 0; int generateCompileUnitEndCallCount = 0; generator.GenerateCompileUnitStartAction = (actualE, baseMethod) => { Assert.Same(e, actualE); Assert.Equal(0, generateCompileUnitEndCallCount); generateCompileUnitStartCallCount++; }; generator.GenerateCompileUnitEndAction = (actualE, baseMethod) => { Assert.Same(e, actualE); generateCompileUnitEndCallCount++; }; generator.GenerateCompileUnit(e); Assert.Equal(1, generateCompileUnitStartCallCount); Assert.Equal(1, generateCompileUnitEndCallCount); } [Fact] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Not fixed on NetFX")] public void GenerateCompileUnit_NullE_ThrowsArgumentNullException() { CodeGeneratorTests generator = this; generator.GenerateCompileUnitStartAction = (actualE, baseMethod) => baseMethod(actualE); generator.GenerateCompileUnitAction = (actualE, baseMethod) => baseMethod(actualE); Assert.Throws<ArgumentNullException>("e", () => generator.GenerateCompileUnit(null)); } [Fact] public void GenerateCompileUnit_NullOutputWithNamespace_ThrowsNullReferenceException() { CodeGeneratorTests generator = this; var e = new CodeCompileUnit(); e.Namespaces.Add(new CodeNamespace("name")); generator.GenerateCompileUnitStartAction = (actualE, baseMethod) => baseMethod(actualE); generator.GenerateCompileUnitAction = (actualE, baseMethod) => baseMethod(actualE); Assert.Throws<NullReferenceException>(() => generator.GenerateCompileUnit(e)); } [Fact] public void GenerateCompileUnitEnd_InvokeWithEndDirectives_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeCompileUnit(); e.EndDirectives.Add(new CodeDirective()); e.EndDirectives.Add(new CodeChecksumPragma()); e.EndDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); generator.GenerateCompileUnitEndAction = (actualE, baseMethod) => baseMethod(actualE); int generateDirectivesCallCount = 0; generator.GenerateDirectivesAction = (actualDirectives, baseMethod) => { baseMethod(actualDirectives); Assert.Same(e.EndDirectives, actualDirectives); generateDirectivesCallCount++; }; generator.GenerateCompileUnitEnd(e); Assert.Equal(1, generateDirectivesCallCount); } [Fact] public void GenerateCompileUnitEnd_InvokeWithoutEndDirectives_Nop() { CodeGeneratorTests generator = this; var e = new CodeCompileUnit(); generator.GenerateCompileUnitEndAction = (actualE, baseMethod) => baseMethod(actualE); generator.GenerateCompileUnitEnd(e); } [Fact] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Not fixed on NetFX")] public void GenerateCompileUnitEnd_NullE_ThrowsArgumentNullException() { CodeGeneratorTests generator = this; generator.GenerateCompileUnitEndAction = (actualE, baseMethod) => baseMethod(actualE); Assert.Throws<ArgumentNullException>("e", () => generator.GenerateCompileUnitEnd(null)); } [Fact] public void GenerateCompileUnitStart_InvokeWithStartDirectives_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeCompileUnit(); e.StartDirectives.Add(new CodeDirective()); e.StartDirectives.Add(new CodeChecksumPragma()); e.StartDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); generator.GenerateCompileUnitStartAction = (actualE, baseMethod) => baseMethod(actualE); int generateDirectivesCallCount = 0; generator.GenerateDirectivesAction = (actualDirectives, baseMethod) => { baseMethod(actualDirectives); Assert.Same(e.StartDirectives, actualDirectives); generateDirectivesCallCount++; }; generator.GenerateCompileUnitStart(e); Assert.Equal(1, generateDirectivesCallCount); } [Fact] public void GenerateCompileUnitStart_InvokeWithoutStartDirectives_Nop() { CodeGeneratorTests generator = this; var e = new CodeCompileUnit(); generator.GenerateCompileUnitStartAction = (actualE, baseMethod) => baseMethod(actualE); generator.GenerateCompileUnitStart(e); } [Fact] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Not fixed on NetFX")] public void GenerateCompileUnitStart_NullE_ThrowsArgumentNullException() { CodeGeneratorTests generator = this; generator.GenerateCompileUnitStartAction = (actualE, baseMethod) => baseMethod(actualE); Assert.Throws<ArgumentNullException>("e", () => generator.GenerateCompileUnitStart(null)); } [Fact] public void GenerateDecimalValue_Invoke_Success() { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { generator.GenerateDecimalValueAction = (actualS, baseMethod) => baseMethod(actualS); generator.GenerateDecimalValue(decimal.MaxValue); Assert.Equal("79228162514264337593543950335", writer.ToString()); }); } [Fact] public void GenerateDecimalValue_InvokeWithoutWriter_ThrowsNullReferenceException() { CodeGeneratorTests generator = this; generator.GenerateDecimalValueAction = (actualS, baseMethod) => baseMethod(actualS); Assert.Throws<NullReferenceException>(() => generator.GenerateDecimalValue(1)); } public static IEnumerable<object[]> GenerateDefaultValueExpression_TestData() { yield return new object[] { null }; yield return new object[] { new CodeDefaultValueExpression() }; } [Theory] [MemberData(nameof(GenerateDefaultValueExpression_TestData))] public void GenerateDefaultValueExpression_InvokeWithOutput_Nop(CodeDefaultValueExpression e) { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { generator.GenerateDefaultValueExpressionAction = (actualE, baseMethod) => baseMethod(e); generator.GenerateDefaultValueExpression(e); }); } [Theory] [MemberData(nameof(GenerateDefaultValueExpression_TestData))] public void GenerateDefaultValueExpression_InvokeWithoutOutput_Nop(CodeDefaultValueExpression e) { CodeGeneratorTests generator = this; generator.GenerateDefaultValueExpressionAction = (actualE, baseMethod) => baseMethod(actualE); generator.GenerateDefaultValueExpression(e); } public static IEnumerable<object[]> GenerateDirectionExpression_TestData() { yield return new object[] { FieldDirection.In, "1" }; yield return new object[] { FieldDirection.Out, "out 1" }; yield return new object[] { FieldDirection.Ref, "ref 1" }; yield return new object[] { FieldDirection.In - 1, "1" }; yield return new object[] { FieldDirection.Ref + 1, "1" }; } [Theory] [MemberData(nameof(GenerateDirectionExpression_TestData))] public void GenerateDirectionExpression_Invoke_Success(FieldDirection direction, string expected) { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { var e = new CodeDirectionExpression(direction, new CodePrimitiveExpression(1)); generator.GenerateDirectionExpressionAction = (actualE, baseMethod) => baseMethod(actualE); generator.GeneratePrimitiveExpressionAction = (actualE, baseMethod) => baseMethod(actualE); int outputDirectionCallCount = 0; generator.OutputDirectionAction = (actualDirection, baseMethod) => { baseMethod(actualDirection); Assert.Equal(e.Direction, actualDirection); outputDirectionCallCount++; }; generator.GenerateDirectionExpression(e); Assert.Equal(expected, writer.ToString()); Assert.Equal(1, outputDirectionCallCount); }); } [Fact] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Not fixed on NetFX")] public void GenerateDirectionExpression_NullE_ThrowsArgumentNullException() { CodeGeneratorTests generator = this; generator.GenerateDirectionExpressionAction = (actualE, baseMethod) => baseMethod(actualE); generator.OutputDirectionAction = (actualDirection, baseMethod) => baseMethod(actualDirection); Assert.Throws<ArgumentNullException>("e", () => generator.GenerateDirectionExpression(null)); } [Fact] public void GenerateDirectionExpression_NullEExpression_ThrowsArgumentNullException() { CodeGeneratorTests generator = this; var e = new CodeDirectionExpression(); generator.GenerateDirectionExpressionAction = (actualE, baseMethod) => baseMethod(actualE); generator.OutputDirectionAction = (actualDirection, baseMethod) => baseMethod(actualDirection); generator.GeneratePrimitiveExpressionAction = (actualE, baseMethod) => baseMethod(actualE); Assert.Throws<ArgumentNullException>("e", () => generator.GenerateDirectionExpression(e)); } [Theory] [InlineData(FieldDirection.Out)] [InlineData(FieldDirection.Ref)] public void GenerateDirectionExpression_InvokeNonInWithoutWriter_ThrowsNullReferenceException(FieldDirection direction) { CodeGeneratorTests generator = this; var e = new CodeDirectionExpression(direction, new CodePrimitiveExpression(1)); generator.GenerateDirectionExpressionAction = (actualE, baseMethod) => baseMethod(actualE); generator.OutputDirectionAction = (actualDirection, baseMethod) => baseMethod(actualDirection); generator.GeneratePrimitiveExpressionAction = (actualE, baseMethod) => baseMethod(actualE); Assert.Throws<NullReferenceException>(() => generator.GenerateDirectionExpression(e)); } public static IEnumerable<object[]> GenerateDirectives_TestData() { yield return new object[] { null }; yield return new object[] { new CodeDirectiveCollection() }; yield return new object[] { new CodeDirectiveCollection(new CodeDirective[] { new CodeDirective() }) }; } [Theory] [MemberData(nameof(GenerateDirectives_TestData))] public void GenerateDirectives_InvokeWithOutput_Nop(CodeDirectiveCollection directives) { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { generator.GenerateDirectivesAction = (actualDirectives, baseMethod) => baseMethod(actualDirectives); generator.GenerateDirectives(directives); }); } [Theory] [MemberData(nameof(GenerateDirectives_TestData))] public void GenerateDirectives_InvokeWithoutOutput_Nop(CodeDirectiveCollection directives) { CodeGeneratorTests generator = this; generator.GenerateDirectivesAction = (actualDirectives, baseMethod) => baseMethod(actualDirectives); generator.GenerateDirectives(directives); } [Fact] public void GenerateDoubleValue_Invoke_Success() { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { generator.GenerateDoubleValueAction = (actualS, baseMethod) => baseMethod(actualS); generator.GenerateDoubleValue(double.MaxValue); Assert.Equal("1.7976931348623157E+308", writer.ToString()); }); } [Fact] public void GenerateDoubleValue_InvokeWithoutWriter_ThrowsNullReferenceException() { CodeGeneratorTests generator = this; generator.GenerateDoubleValueAction = (actualS, baseMethod) => baseMethod(actualS); Assert.Throws<NullReferenceException>(() => generator.GenerateDoubleValue(1)); } [Fact] public void GenerateExpression_CodeArgumentReferenceExpression_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeArgumentReferenceExpression(); int callCount = 0; generator.GenerateArgumentReferenceExpressionAction = (actualE) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateExpression(e); Assert.Equal(1, callCount); } [Fact] public void GenerateExpression_CodeArrayCreateExpression_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeArrayCreateExpression(); int callCount = 0; generator.GenerateArrayCreateExpressionAction = (actualE) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateExpression(e); Assert.Equal(1, callCount); } [Fact] public void GenerateExpression_CodeArrayIndexerExpression_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeArrayIndexerExpression(); int callCount = 0; generator.GenerateArrayIndexerExpressionAction = (actualE) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateExpression(e); Assert.Equal(1, callCount); } [Fact] public void GenerateExpression_CodeBaseReferenceExpression_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeBaseReferenceExpression(); int callCount = 0; generator.GenerateBaseReferenceExpressionAction = (actualE) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateExpression(e); Assert.Equal(1, callCount); } [Fact] public void GenerateExpression_CodeCastExpression_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeCastExpression(); int callCount = 0; generator.GenerateCastExpressionAction = (actualE) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateExpression(e); Assert.Equal(1, callCount); } [Fact] public void GenerateExpression_CodeDefaultValueExpression_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeDefaultValueExpression(); int callCount = 0; generator.GenerateDefaultValueExpressionAction = (actualE, baseMethod) => { baseMethod(actualE); Assert.Same(e, actualE); callCount++; }; generator.GenerateExpression(e); Assert.Equal(1, callCount); } [Fact] public void GenerateExpression_CodeDelegateCreateExpression_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeDelegateCreateExpression(); int callCount = 0; generator.GenerateDelegateCreateExpressionAction = (actualE) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateExpression(e); Assert.Equal(1, callCount); } [Fact] public void GenerateExpression_CodeDelegateInvokeExpression_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeDelegateInvokeExpression(); int callCount = 0; generator.GenerateDelegateInvokeExpressionAction = (actualE) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateExpression(e); Assert.Equal(1, callCount); } [Fact] public void GenerateExpression_CodeDirectionExpression_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeDirectionExpression(); int callCount = 0; generator.GenerateDirectionExpressionAction = (actualE, baseMethod) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateExpression(e); Assert.Equal(1, callCount); } [Fact] public void GenerateExpression_CodeEventReferenceExpression_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeEventReferenceExpression(); int callCount = 0; generator.GenerateEventReferenceExpressionAction = (actualE) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateExpression(e); Assert.Equal(1, callCount); } [Fact] public void GenerateExpression_CodeFieldReferenceExpression_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeFieldReferenceExpression(); int callCount = 0; generator.GenerateFieldReferenceExpressionAction = (actualE) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateExpression(e); Assert.Equal(1, callCount); } [Fact] public void GenerateExpression_CodeIndexerExpression_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeIndexerExpression(); int callCount = 0; generator.GenerateIndexerExpressionAction = (actualE) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateExpression(e); Assert.Equal(1, callCount); } [Fact] public void GenerateExpression_CodeMethodInvokeExpression_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeMethodInvokeExpression(); int callCount = 0; generator.GenerateMethodInvokeExpressionAction = (actualE) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateExpression(e); Assert.Equal(1, callCount); } [Fact] public void GenerateExpression_CodeMethodReferenceExpression_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeMethodReferenceExpression(); int callCount = 0; generator.GenerateMethodReferenceExpressionAction = (actualE) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateExpression(e); Assert.Equal(1, callCount); } [Fact] public void GenerateExpression_CodeObjectCreateExpression_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeObjectCreateExpression(); int callCount = 0; generator.GenerateObjectCreateExpressionAction = (actualE) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateExpression(e); Assert.Equal(1, callCount); } [Fact] public void GenerateExpression_CodeParameterDeclarationExpression_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeParameterDeclarationExpression(); int callCount = 0; generator.GenerateParameterDeclarationExpressionAction = (actualE, baseMethod) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateExpression(e); Assert.Equal(1, callCount); } [Fact] public void GenerateExpression_CodePrimitiveExpression_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodePrimitiveExpression(); int callCount = 0; generator.GeneratePrimitiveExpressionAction = (actualE, baseMethod) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateExpression(e); Assert.Equal(1, callCount); } [Fact] public void GenerateExpression_CodePropertyReferenceExpression_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodePropertyReferenceExpression(); int callCount = 0; generator.GeneratePropertyReferenceExpressionAction = (actualE) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateExpression(e); Assert.Equal(1, callCount); } [Fact] public void GenerateExpression_CodePropertySetValueReferenceExpression_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodePropertySetValueReferenceExpression(); int callCount = 0; generator.GeneratePropertySetValueReferenceExpressionAction = (actualE) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateExpression(e); Assert.Equal(1, callCount); } [Fact] public void GenerateExpression_CodeSnippetExpression_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeSnippetExpression(); int callCount = 0; generator.GenerateSnippetExpressionAction = (actualE) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateExpression(e); Assert.Equal(1, callCount); } [Fact] public void GenerateExpression_CodeThisReferenceExpression_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeThisReferenceExpression(); int callCount = 0; generator.GenerateThisReferenceExpressionAction = (actualE) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateExpression(e); Assert.Equal(1, callCount); } [Fact] public void GenerateExpression_CodeTypeOfExpression_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeTypeOfExpression(); int callCount = 0; generator.GenerateTypeOfExpressionAction = (actualE, baseMethod) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateExpression(e); Assert.Equal(1, callCount); } [Fact] public void GenerateExpression_CodeTypeReferenceExpression_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeTypeReferenceExpression(); int callCount = 0; generator.GenerateTypeReferenceExpressionAction = (actualE, baseMethod) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateExpression(e); Assert.Equal(1, callCount); } [Fact] public void GenerateVariableReferenceExpression_CodeVariableReferenceExpression_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeVariableReferenceExpression(); int callCount = 0; generator.GenerateVariableReferenceExpressionAction = (actualE) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateExpression(e); Assert.Equal(1, callCount); } [Fact] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Not fixed on NetFX")] public void GenerateExpression_NullE_ThrowsArgumentNullException() { CodeGeneratorTests generator = this; Assert.Throws<ArgumentNullException>("e", () => generator.GenerateExpression(null)); } public static IEnumerable<object[]> GenerateExpression_InvalidE_TestData() { yield return new object[] { new CodeExpression() }; yield return new object[] { new CustomCodeExpression() }; } [Theory] [MemberData(nameof(GenerateExpression_InvalidE_TestData))] public void GenerateExpression_InvalidE_ThrowsArgumentException(CodeExpression e) { CodeGeneratorTests generator = this; Assert.Throws<ArgumentException>("e", () => generator.GenerateExpression(e)); } [Fact] public void GenerateNamespace_InvokeEmpty_Success() { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { var e = new CodeNamespace(); int generateNamespaceStartCallCount = 0; int generateNamespaceEndCallCount = 0; generator.GenerateNamespaceAction = (actualE, baseMethod) => baseMethod(actualE); generator.GenerateNamespaceStartAction = (actualE) => { Assert.Same(e, actualE); Assert.Equal(0, generateNamespaceEndCallCount); writer.Write("NamespaceStart "); generateNamespaceStartCallCount++; }; generator.GenerateNamespaceEndAction = (actualE) => { Assert.Same(e, actualE); writer.Write("NamespaceEnd"); generateNamespaceEndCallCount++; }; generator.GenerateNamespace(e); Assert.Equal(1, generateNamespaceStartCallCount); Assert.Equal(1, generateNamespaceEndCallCount); Assert.Equal($"NamespaceStart {Environment.NewLine}NamespaceEnd", writer.ToString()); }); } [Fact] public void GenerateNamespace_InvokeWithComments_Success() { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { var e = new CodeNamespace(); e.Comments.Add(new CodeCommentStatement("Comment")); e.Comments.Add(new CodeCommentStatement("Comment")); int generateCommentStatementsCallCount = 0; int generateCommentCallCount = 0; int generateNamespaceStartCallCount = 0; int generateNamespaceEndCallCount = 0; generator.GenerateNamespaceAction = (actualE, baseMethod) => baseMethod(actualE); generator.GenerateCommentStatementsAction = (actualE, baseMethod) => { baseMethod(actualE); Assert.Same(e.Comments, actualE); Assert.Equal(0, generateNamespaceStartCallCount); Assert.Equal(0, generateNamespaceEndCallCount); writer.Write("Comments "); generateCommentStatementsCallCount++; }; generator.GenerateCommentAction = (actualE) => { Assert.Same(e.Comments[generateCommentCallCount].Comment, actualE); writer.Write("Comment "); generateCommentCallCount++; }; generator.GenerateNamespaceStartAction = (actualE) => { Assert.Same(e, actualE); Assert.Equal(0, generateNamespaceEndCallCount); writer.Write("NamespaceStart "); generateNamespaceStartCallCount++; }; generator.GenerateNamespaceEndAction = (actualE) => { Assert.Same(e, actualE); writer.Write("NamespaceEnd"); generateNamespaceEndCallCount++; }; generator.GenerateNamespace(e); Assert.Equal(1, generateCommentStatementsCallCount); Assert.Equal(2, generateCommentCallCount); Assert.Equal(1, generateNamespaceStartCallCount); Assert.Equal(1, generateNamespaceEndCallCount); Assert.Equal($"Comment Comment Comments NamespaceStart {Environment.NewLine}NamespaceEnd", writer.ToString()); }); } [Fact] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Not fixed on NetFX")] public void GenerateNamespace_NullE_ThrowsArgumentNullException() { CodeGeneratorTests generator = this; generator.GenerateNamespaceAction = (actualE, baseMethod) => baseMethod(actualE); Assert.Throws<ArgumentNullException>("e", () => generator.GenerateNamespace(null)); } [Fact] public void GenerateNamespace_InvokeWithoutWriter_ThrowsNullReferenceException() { CodeGeneratorTests generator = this; var e = new CodeNamespace(); generator.GenerateNamespaceAction = (actualE, baseMethod) => baseMethod(actualE); generator.GenerateCommentStatementsAction = (actualE, baseMethod) => baseMethod(actualE); generator.GenerateNamespaceStartAction = (actualE) => {}; generator.GenerateNamespaceEndAction = (actualE) => {}; Assert.Throws<NullReferenceException>(() => generator.GenerateNamespace(e)); } [Fact] public void GenerateNamespace_NullValueInE_ThrowsArgumentException() { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { var e = new CodeNamespace(); e.Comments.Add(new CodeCommentStatement()); generator.GenerateNamespaceAction = (actualE, baseMethod) => baseMethod(actualE); generator.GenerateCommentStatementsAction = (actualE, baseMethod) => baseMethod(actualE); generator.GenerateNamespaceStartAction = (actualE) => {}; generator.GenerateNamespaceEndAction = (actualE) => {}; Assert.Throws<ArgumentException>("e", () => generator.GenerateNamespace(e)); }); } [Fact] public void GenerateNamespaceImports_InvokeEmptyWithOutput_Success() { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { var e = new CodeNamespace(); int generateNamespaceCallCount = 0; generator.GenerateNamespaceImportAction = (actualE) => generateNamespaceCallCount++; generator.GenerateNamespaceImports(e); Assert.Equal(0, generateNamespaceCallCount); Assert.Empty(writer.ToString()); }); } [Fact] public void GenerateNamespaceImports_InvokeNonEmptyWithOutput_Success() { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { var e = new CodeNamespace(); e.Imports.Add(new CodeNamespaceImport("Namespace1")); e.Imports.Add(new CodeNamespaceImport("Namespace2")); int generateNamespaceCallCount = 0; generator.GenerateNamespaceImportAction = (actualE) => { Assert.Same(e.Imports[generateNamespaceCallCount], actualE); generateNamespaceCallCount++; }; generator.GenerateNamespaceImports(e); Assert.Equal(2, generateNamespaceCallCount); Assert.Empty(writer.ToString()); }); } [Fact] public void GenerateNamespaceImports_InvokeWithOutputWithLinePragma_Success() { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { var e = new CodeNamespace(); e.Imports.Add(new CodeNamespaceImport("Namespace1") { LinePragma = new CodeLinePragma() }); e.Imports.Add(new CodeNamespaceImport("Namespace2") { LinePragma = new CodeLinePragma() }); int generateLinePragmaStartCallCount = 0; int generateNamespaceCallCount = 0; int generateLinePragmaEndCallCount = 0; generator.GenerateLinePragmaStartAction = (actualE) => { Assert.Same(e.Imports[generateLinePragmaEndCallCount].LinePragma, actualE); Assert.Equal(generateLinePragmaStartCallCount, generateNamespaceCallCount); Assert.Equal(generateLinePragmaStartCallCount, generateLinePragmaEndCallCount); writer.Write("LinePragmaStart "); generateLinePragmaStartCallCount++; }; generator.GenerateNamespaceImportAction = (actualE) => { Assert.Same(e.Imports[generateLinePragmaEndCallCount], actualE); Assert.Equal(generateNamespaceCallCount, generateLinePragmaEndCallCount); writer.Write("Namespace "); generateNamespaceCallCount++; }; generator.GenerateLinePragmaEndAction = (actualE) => { Assert.Same(e.Imports[generateLinePragmaEndCallCount].LinePragma, actualE); writer.Write("LinePragmaEnd"); generateLinePragmaEndCallCount++; }; generator.GenerateNamespaceImports(e); Assert.Equal(2, generateLinePragmaStartCallCount); Assert.Equal(2, generateNamespaceCallCount); Assert.Equal(2, generateLinePragmaEndCallCount); Assert.Equal("LinePragmaStart Namespace LinePragmaEndLinePragmaStart Namespace LinePragmaEnd", writer.ToString()); }); } [Fact] public void GenerateNamespaceImports_InvokeEmptyWithoutOutput_Success() { CodeGeneratorTests generator = this; var e = new CodeNamespace(); int generateNamespaceCallCount = 0; generator.GenerateNamespaceImportAction = (actualE) => generateNamespaceCallCount++; generator.GenerateNamespaceImports(e); Assert.Equal(0, generateNamespaceCallCount); } [Fact] public void GenerateNamespaceImports_InvokeWithoutOutput_Success() { CodeGeneratorTests generator = this; var e = new CodeNamespace(); e.Imports.Add(new CodeNamespaceImport("Namespace1")); e.Imports.Add(new CodeNamespaceImport("Namespace2")); int generateNamespaceCallCount = 0; generator.GenerateNamespaceImportAction = (actualE) => { Assert.Same(e.Imports[generateNamespaceCallCount], actualE); generateNamespaceCallCount++; }; generator.GenerateNamespaceImports(e); Assert.Equal(2, generateNamespaceCallCount); } [Fact] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Not fixed on NetFX")] public void GenerateNamespaceImports_NullE_ThrowsArgumentNullException() { CodeGeneratorTests generator = this; Assert.Throws<ArgumentNullException>("e", () => generator.GenerateNamespaceImports(null)); } public static IEnumerable<object[]> GenerateParameterDeclarationExpression_TestData() { yield return new object[] { null, null, FieldDirection.In, "Type " }; yield return new object[] { string.Empty, string.Empty, FieldDirection.In, "Type " }; yield return new object[] { "type", "name", FieldDirection.In, "Type name" }; yield return new object[] { null, null, FieldDirection.Out, "out Type " }; yield return new object[] { string.Empty, string.Empty, FieldDirection.Out, "out Type " }; yield return new object[] { "type", "name", FieldDirection.Out, "out Type name" }; yield return new object[] { null, null, FieldDirection.Ref, "ref Type " }; yield return new object[] { string.Empty, string.Empty, FieldDirection.Ref, "ref Type " }; yield return new object[] { "type", "name", FieldDirection.Ref, "ref Type name" }; yield return new object[] { null, null, FieldDirection.In - 1, "Type " }; yield return new object[] { string.Empty, string.Empty, FieldDirection.In - 1, "Type " }; yield return new object[] { "type", "name", FieldDirection.In - 1, "Type name" }; yield return new object[] { null, null, FieldDirection.Ref + 1, "Type " }; yield return new object[] { string.Empty, string.Empty, FieldDirection.Ref + 1, "Type " }; yield return new object[] { "type", "name", FieldDirection.Ref + 1, "Type name" }; } [Theory] [MemberData(nameof(GenerateParameterDeclarationExpression_TestData))] public void GenerateParameterDeclarationExpression_Invoke_Success(string type, string name, FieldDirection direction, string expected) { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { var e = new CodeParameterDeclarationExpression(type, name) { Direction = direction }; generator.GenerateParameterDeclarationExpressionAction = (actualE, baseMethod) => baseMethod(actualE); int outputDirectionCallCount = 0; int outputTypeNamePairCallCount = 0; int outputTypeCallCount = 0; generator.OutputDirectionAction = (actualDirection, baseMethod) => { baseMethod(actualDirection); Assert.Equal(e.Direction, actualDirection); Assert.Equal(0, outputTypeNamePairCallCount); Assert.Equal(0, outputTypeCallCount); outputDirectionCallCount++; }; generator.OutputTypeNamePairAction = (actualType, actualName, baseMethod) => { baseMethod(actualType, actualName); Assert.Same(e.Type, actualType); Assert.Same(e.Name, actualName); outputTypeNamePairCallCount++; }; generator.OutputTypeAction = (actualType) => { Assert.Same(e.Type, actualType); writer.Write("Type"); outputTypeCallCount++; }; generator.OutputIdentifierAction = (actualIdent, baseMethod) => baseMethod(actualIdent); generator.GenerateParameterDeclarationExpression(e); Assert.Equal(expected, writer.ToString()); Assert.Equal(1, outputDirectionCallCount); Assert.Equal(1, outputTypeNamePairCallCount); Assert.Equal(1, outputTypeCallCount); }); } [Fact] public void GenerateParameterDeclarationExpression_InvokeWithCustomAttributes_Success() { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { var e = new CodeParameterDeclarationExpression("Type", "Name") { Direction = FieldDirection.Ref }; e.CustomAttributes.Add(new CodeAttributeDeclaration("name")); e.CustomAttributes.Add(new CodeAttributeDeclaration("name")); generator.GenerateParameterDeclarationExpressionAction = (actualE, baseMethod) => baseMethod(actualE); int outputAttributeDeclarationsCallCount = 0; int outputDirectionCallCount = 0; int outputTypeNamePairCallCount = 0; int outputTypeCallCount = 0; generator.OutputAttributeDeclarationsAction = (actualAttributes, baseMethod) => { baseMethod(actualAttributes); Assert.Same(e.CustomAttributes, actualAttributes); Assert.Equal(0, outputDirectionCallCount); Assert.Equal(0, outputTypeNamePairCallCount); Assert.Equal(0, outputTypeCallCount); outputAttributeDeclarationsCallCount++; }; generator.GenerateAttributeDeclarationsStartAction = (actualArg) => writer.Write("StartAttributes "); generator.OutputAttributeArgumentAction = (actualArg, baseMethod) => baseMethod(actualArg); generator.GenerateAttributeDeclarationsEndAction = (actualArg) => writer.Write(" EndAttributes"); generator.OutputDirectionAction = (actualDirection, baseMethod) => { baseMethod(actualDirection); Assert.Equal(e.Direction, actualDirection); Assert.Equal(0, outputTypeNamePairCallCount); Assert.Equal(0, outputTypeCallCount); outputDirectionCallCount++; }; generator.OutputTypeNamePairAction = (actualType, actualName, baseMethod) => { baseMethod(actualType, actualName); Assert.Same(e.Type, actualType); Assert.Same(e.Name, actualName); outputTypeNamePairCallCount++; }; generator.OutputTypeAction = (actualType) => { Assert.Same(e.Type, actualType); writer.Write("Type"); outputTypeCallCount++; }; generator.OutputIdentifierAction = (actualIdent, baseMethod) => baseMethod(actualIdent); generator.GenerateParameterDeclarationExpression(e); Assert.Equal($"StartAttributes name(), {Environment.NewLine}name() EndAttributes ref Type Name", writer.ToString()); Assert.Equal(1, outputAttributeDeclarationsCallCount); Assert.Equal(1, outputDirectionCallCount); Assert.Equal(1, outputTypeNamePairCallCount); Assert.Equal(1, outputTypeCallCount); }); } [Fact] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Not fixed on NetFX")] public void GenerateParameterDeclarationExpression_NullE_ThrowsArgumentNullException() { CodeGeneratorTests generator = this; generator.GenerateParameterDeclarationExpressionAction = (actualE, baseMethod) => baseMethod(actualE); Assert.Throws<ArgumentNullException>("e", () => generator.GenerateParameterDeclarationExpression(null)); } [Fact] public void GenerateParameterDeclarationExpression_InvokeWithoutOutput_ThrowsNullReferenceException() { CodeGeneratorTests generator = this; var e = new CodeParameterDeclarationExpression(); generator.GenerateParameterDeclarationExpressionAction = (actualE, baseMethod) => baseMethod(actualE); generator.OutputTypeNamePairAction = (actualType, actualName, baseMethod) => baseMethod(actualType, actualName); generator.OutputDirectionAction = (actualDirection, baseMethod) => baseMethod(actualDirection); generator.OutputTypeAction = (actualType) => { }; Assert.Throws<NullReferenceException>(() => generator.GenerateParameterDeclarationExpression(e)); } public static IEnumerable<object[]> GeneratePrimitiveExpression_TestData() { yield return new object[] { null, "NullToken" }; yield return new object[] { 'a', "'a'" }; yield return new object[] { (short)1, "1" }; yield return new object[] { 1, "1" }; yield return new object[] { (long)1, "1" }; yield return new object[] { (byte)1, "1" }; yield return new object[] { true, "true" }; yield return new object[] { false, "false" }; } [Theory] [MemberData(nameof(GeneratePrimitiveExpression_TestData))] public void GeneratePrimitiveExpression_Invoke_Success(object value, string expected) { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { var e = new CodePrimitiveExpression(value); generator.GeneratePrimitiveExpressionAction = (actualE, baseMethod) => baseMethod(actualE); generator.GeneratePrimitiveExpression(e); Assert.Equal(expected, writer.ToString()); }); } [Fact] public void GeneratePrimitiveExpression_InvokeFloat_Success() { CodeGeneratorTests generator = this; var e = new CodePrimitiveExpression((float)1); generator.GeneratePrimitiveExpressionAction = (actualE, baseMethod) => baseMethod(actualE); int generateSingleFloatValueCallCount = 0; generator.GenerateSingleFloatValueAction = (actualValue, baseMethod) => { Assert.Equal((float)1, actualValue); generateSingleFloatValueCallCount++; }; generator.GeneratePrimitiveExpression(e); Assert.Equal(1, generateSingleFloatValueCallCount); } [Fact] public void GeneratePrimitiveExpression_InvokeDouble_Success() { CodeGeneratorTests generator = this; var e = new CodePrimitiveExpression((double)1); generator.GeneratePrimitiveExpressionAction = (actualE, baseMethod) => baseMethod(actualE); int generateDoubleValueCallCount = 0; generator.GenerateDoubleValueAction = (actualValue, baseMethod) => { Assert.Equal((double)1, actualValue); generateDoubleValueCallCount++; }; generator.GeneratePrimitiveExpression(e); Assert.Equal(1, generateDoubleValueCallCount); } [Fact] public void GeneratePrimitiveExpression_InvokeDecimal_Success() { CodeGeneratorTests generator = this; var e = new CodePrimitiveExpression((decimal)1); generator.GeneratePrimitiveExpressionAction = (actualE, baseMethod) => baseMethod(actualE); int generateDecimalValueCallCount = 0; generator.GenerateDecimalValueAction = (actualValue, baseMethod) => { Assert.Equal((decimal)1, actualValue); generateDecimalValueCallCount++; }; generator.GeneratePrimitiveExpression(e); Assert.Equal(1, generateDecimalValueCallCount); } [Theory] [InlineData(null, "")] [InlineData("", "")] [InlineData("result", "result")] public void GeneratePrimitiveExpression_InvokeString_Success(string result, string expected) { CodeGeneratorTests generator = this; generator.GeneratePrimitiveExpressionAction = (actualE, baseMethod) => baseMethod(actualE); PerformActionWithOutput(writer => { var e = new CodePrimitiveExpression("value"); generator.GeneratePrimitiveExpressionAction = (actualE, baseMethod) => baseMethod(actualE); int quoteSnippetCallCount = 0; generator.QuoteSnippetStringAction = (actualValue) => { Assert.Equal("value", actualValue); quoteSnippetCallCount++; return result; }; generator.GeneratePrimitiveExpression(e); Assert.Equal(expected, writer.ToString()); Assert.Equal(1, quoteSnippetCallCount); }); } [Fact] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Not fixed on NetFX")] public void GeneratePrimitiveExpression_NullE_ThrowsArgumentNullException() { CodeGeneratorTests generator = this; generator.GeneratePrimitiveExpressionAction = (actualE, baseMethod) => baseMethod(actualE); Assert.Throws<ArgumentNullException>("e", () => generator.GeneratePrimitiveExpression(null)); } public static IEnumerable<object[]> GeneratePrimitiveExpression_InvalidEValue_TestData() { yield return new object[] { new object() }; yield return new object[] { DBNull.Value }; yield return new object[] { new DateTime() }; yield return new object[] { (sbyte)1 }; yield return new object[] { (ushort)1 }; yield return new object[] { (uint)1 }; yield return new object[] { (ulong)1 }; } [Theory] [MemberData(nameof(GeneratePrimitiveExpression_InvalidEValue_TestData))] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Not fixed on NetFX")] public void GeneratePrimitiveExpression_InvalidE_ThrowsArgumentException(object value) { CodeGeneratorTests generator = this; var e = new CodePrimitiveExpression(value); generator.GeneratePrimitiveExpressionAction = (actualE, baseMethod) => baseMethod(actualE); Assert.Throws<ArgumentException>("e", () => generator.GeneratePrimitiveExpression(e)); } public static IEnumerable<object[]> GeneratePrimitiveExpression_WithoutOutput_TestData() { yield return new object[] { null }; yield return new object[] { "" }; yield return new object[] { "value" }; yield return new object[] { 'a' }; yield return new object[] { (short)1 }; yield return new object[] { 1 }; yield return new object[] { (long)1 }; yield return new object[] { (byte)1 }; yield return new object[] { (float)1 }; yield return new object[] { (double)1 }; yield return new object[] { (decimal)1 }; yield return new object[] { true }; yield return new object[] { false }; } [Theory] [MemberData(nameof(GeneratePrimitiveExpression_WithoutOutput_TestData))] public void GeneratePrimitiveExpression_InvokeWithoutOutput_ThrowsNullRefereneException(object value) { CodeGeneratorTests generator = this; var e = new CodePrimitiveExpression(value); generator.GeneratePrimitiveExpressionAction = (actualE, baseMethod) => baseMethod(actualE); generator.GenerateSingleFloatValueAction = (actualS, baseMethod) => baseMethod(actualS); generator.GenerateDoubleValueAction = (actualD, baseMethod) => baseMethod(actualD); generator.GenerateDecimalValueAction = (actualD, baseMethod) => baseMethod(actualD); generator.QuoteSnippetStringAction = (actualValue) => actualValue; Assert.Throws<NullReferenceException>(() => generator.GeneratePrimitiveExpression(e)); } [Fact] public void GenerateSingleFloatValue_Invoke_Success() { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { generator.GenerateSingleFloatValueAction = (actualS, baseMethod) => baseMethod(actualS); generator.GenerateSingleFloatValue(float.MaxValue); Assert.Equal(float.MaxValue.ToString("R", CultureInfo.InvariantCulture.NumberFormat), writer.ToString()); }); } [Fact] public void GenerateSingleFloatValue_InvokeWithoutWriter_ThrowsNullReferenceException() { CodeGeneratorTests generator = this; generator.GenerateSingleFloatValueAction = (actualS, baseMethod) => baseMethod(actualS); Assert.Throws<NullReferenceException>(() => generator.GenerateSingleFloatValue(1)); } [Theory] [InlineData(null, "")] [InlineData("", "")] [InlineData("value", "value")] public void GenerateSnippetCompileUnit_Invoke_Success(string value, string expected) { CodeGeneratorTests generator = this; var e = new CodeSnippetCompileUnit(value); PerformActionWithOutput(writer => { generator.GenerateSnippetCompileUnit(e); Assert.Equal(expected + Environment.NewLine, writer.ToString()); }); } [Fact] public void GenerateSnippetCompileUnit_InvokeWithDirectivesAndLinePragma_Success() { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { var e = new CodeSnippetCompileUnit("value") { LinePragma = new CodeLinePragma() }; e.StartDirectives.Add(new CodeDirective()); e.StartDirectives.Add(new CodeChecksumPragma()); e.StartDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); e.EndDirectives.Add(new CodeDirective()); e.EndDirectives.Add(new CodeChecksumPragma()); e.EndDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); int generateLinePragmaStartCallCount = 0; int generateDirectivesCallCount = 0; int generateLinePragmaEndCallCount = 0; generator.GenerateLinePragmaStartAction = (actualE) => { Assert.Same(e.LinePragma, actualE); Assert.Equal(0, generateLinePragmaEndCallCount); Assert.Equal(1, generateDirectivesCallCount); writer.Write("LinePragmaStart "); generateLinePragmaStartCallCount++; }; generator.GenerateLinePragmaEndAction = (actualE) => { Assert.Same(e.LinePragma, actualE); Assert.Equal(1, generateDirectivesCallCount); writer.Write("LinePragmaEnd "); generateLinePragmaEndCallCount++; }; generator.GenerateDirectivesAction = (actualDirectives, baseMethod) => { baseMethod(actualDirectives); Assert.Same(generateDirectivesCallCount == 0 ? e.StartDirectives : e.EndDirectives, actualDirectives); writer.Write(generateDirectivesCallCount == 0 ? "StartDirectives " : "EndDirectives"); generateDirectivesCallCount++; }; generator.GenerateSnippetCompileUnit(e); Assert.Equal($"StartDirectives LinePragmaStart value{Environment.NewLine}LinePragmaEnd EndDirectives", writer.ToString()); Assert.Equal(1, generateLinePragmaStartCallCount); Assert.Equal(2, generateDirectivesCallCount); Assert.Equal(1, generateLinePragmaEndCallCount); }); } [Fact] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Not fixed on NetFX")] public void GenerateSnippetCompileUnit_NullE_ThrowsArgumentNullException() { CodeGeneratorTests generator = this; Assert.Throws<ArgumentNullException>("e", () => generator.GenerateSnippetCompileUnit(null)); } [Fact] public void GenerateSnippetCompileUnit_InvokeWithoutOutput_ThrowsNullReferenceException() { CodeGeneratorTests generator = this; var e = new CodeSnippetCompileUnit(); Assert.Throws<NullReferenceException>(() => generator.GenerateSnippetCompileUnit(e)); } [Theory] [InlineData(null, "")] [InlineData("", "")] [InlineData("value", "value")] public void GenerateSnippetStatement_Invoke_Success(string value, string expected) { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { generator.GenerateSnippetStatementAction = (actualE, baseMethod) => baseMethod(actualE); var e = new CodeSnippetStatement(value); generator.GenerateSnippetStatement(e); Assert.Equal(expected + Environment.NewLine, writer.ToString()); }); } [Fact] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Not fixed on NetFX")] public void GenerateSnippetStatement_NullE_ThrowsArgumentNullException() { CodeGeneratorTests generator = this; generator.GenerateSnippetStatementAction = (actualE, baseMethod) => baseMethod(actualE); Assert.Throws<ArgumentNullException>("e", () => generator.GenerateSnippetStatement(null)); } [Fact] public void GenerateStatement_InvokeWithDirectivesAndLinePragma_Success() { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { var e = new CodeGotoStatement { LinePragma = new CodeLinePragma() }; e.StartDirectives.Add(new CodeDirective()); e.StartDirectives.Add(new CodeChecksumPragma()); e.StartDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); e.EndDirectives.Add(new CodeDirective()); e.EndDirectives.Add(new CodeChecksumPragma()); e.EndDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); int generateLinePragmaStartCallCount = 0; int generateDirectivesCallCount = 0; int generateStatementCallCount = 0; int generateLinePragmaEndCallCount = 0; generator.GenerateLinePragmaStartAction = (actualE) => { Assert.Same(e.LinePragma, actualE); Assert.Equal(0, generateLinePragmaEndCallCount); Assert.Equal(0, generateStatementCallCount); Assert.Equal(1, generateDirectivesCallCount); writer.Write("LinePragmaStart "); generateLinePragmaStartCallCount++; }; generator.GenerateGotoStatementAction = (actualE) => { Assert.Same(e, actualE); Assert.Equal(0, generateLinePragmaEndCallCount); Assert.Equal(1, generateDirectivesCallCount); writer.Write("Statement "); generateStatementCallCount++; }; generator.GenerateLinePragmaEndAction = (actualE) => { Assert.Same(e.LinePragma, actualE); Assert.Equal(1, generateDirectivesCallCount); writer.Write("LinePragmaEnd "); generateLinePragmaEndCallCount++; }; generator.GenerateDirectivesAction = (actualDirectives, baseMethod) => { baseMethod(actualDirectives); Assert.Same(generateDirectivesCallCount == 0 ? e.StartDirectives : e.EndDirectives, actualDirectives); writer.Write(generateDirectivesCallCount == 0 ? "StartDirectives " : "EndDirectives"); generateDirectivesCallCount++; }; generator.GenerateStatement(e); Assert.Equal($"StartDirectives LinePragmaStart Statement LinePragmaEnd EndDirectives", writer.ToString()); Assert.Equal(1, generateLinePragmaStartCallCount); Assert.Equal(1, generateStatementCallCount); Assert.Equal(2, generateDirectivesCallCount); Assert.Equal(1, generateLinePragmaEndCallCount); }); } [Fact] public void GenerateStatement_CodeAssignStatement_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeAssignStatement(); int callCount = 0; generator.GenerateAssignStatementAction = (actualE) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateStatement(e); Assert.Equal(1, callCount); } [Fact] public void GenerateStatement_CodeAttachEventStatement_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeAttachEventStatement(); int callCount = 0; generator.GenerateAttachEventStatementAction = (actualE) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateStatement(e); Assert.Equal(1, callCount); } [Fact] public void GenerateStatement_CodeConditionStatement_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeConditionStatement(); int callCount = 0; generator.GenerateConditionStatementAction = (actualE) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateStatement(e); Assert.Equal(1, callCount); } [Fact] public void GenerateStatement_CodeExpressionStatement_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeExpressionStatement(); int callCount = 0; generator.GenerateExpressionStatementAction = (actualE) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateStatement(e); Assert.Equal(1, callCount); } [Fact] public void GenerateStatement_CodeGotoStatement_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeGotoStatement(); int callCount = 0; generator.GenerateGotoStatementAction = (actualE) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateStatement(e); Assert.Equal(1, callCount); } [Fact] public void GenerateStatement_CodeIterationStatement_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeIterationStatement(); int callCount = 0; generator.GenerateIterationStatementAction = (actualE) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateStatement(e); Assert.Equal(1, callCount); } [Fact] public void GenerateStatement_CodeLabeledStatement_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeLabeledStatement(); int callCount = 0; generator.GenerateLabeledStatementAction = (actualE) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateStatement(e); Assert.Equal(1, callCount); } [Fact] public void GenerateStatement_CodeMethodReturnStatement_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeMethodReturnStatement(); int callCount = 0; generator.GenerateMethodReturnStatementAction = (actualE) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateStatement(e); Assert.Equal(1, callCount); } [Fact] public void GenerateStatement_CodeRemoveEventStatement_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeRemoveEventStatement(); int callCount = 0; generator.GenerateRemoveEventStatementAction = (actualE) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateStatement(e); Assert.Equal(1, callCount); } [Fact] public void GenerateStatement_CodeSnippetStatement_CallsCorrectMethod() { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { generator.Indent = 1; var e = new CodeSnippetStatement(); int callCount = 0; generator.GenerateSnippetStatementAction = (actualE, baseMethod) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateStatement(e); Assert.Equal(1, callCount); Assert.Equal(1, generator.Indent); }); } [Fact] public void GenerateStatement_CodeThrowExpressionStatement_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeThrowExceptionStatement(); int callCount = 0; generator.GenerateThrowExceptionStatementAction = (actualE) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateStatement(e); Assert.Equal(1, callCount); } [Fact] public void GenerateStatement_CodeTryCatchFinallyStatement_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeTryCatchFinallyStatement(); int callCount = 0; generator.GenerateTryCatchFinallyStatementAction = (actualE) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateStatement(e); Assert.Equal(1, callCount); } [Fact] public void GenerateStatement_CodeVariableDeclarationStatement_CallsCorrectMethod() { CodeGeneratorTests generator = this; var e = new CodeVariableDeclarationStatement(); int callCount = 0; generator.GenerateVariableDeclarationStatementAction = (actualE) => { Assert.Same(e, actualE); callCount++; }; generator.GenerateStatement(e); Assert.Equal(1, callCount); } [Fact] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Not fixed on NetFX")] public void GenerateStatement_NullE_ThrowsArgumentNullException() { CodeGeneratorTests generator = this; Assert.Throws<ArgumentNullException>("e", () => generator.GenerateStatement(null)); } public static IEnumerable<object[]> GenerateStatement_InvalidE_TestData() { yield return new object[] { new CodeStatement() }; yield return new object[] { new CustomCodeStatement() }; } [Theory] [MemberData(nameof(GenerateStatement_InvalidE_TestData))] public void GenerateStatement_InvalidE_ThrowsArgumentException(CodeStatement e) { CodeGeneratorTests generator = this; Assert.Throws<ArgumentException>("e", () => generator.GenerateStatement(e)); } [Fact] public void GenerateStatement_CodeSnippetStatementWithoutOutput_ThrowsNullReferenceException() { CodeGeneratorTests generator = this; var e = new CodeSnippetStatement(); generator.GenerateSnippetStatementAction = (actualE, baseMethod) => baseMethod(actualE); Assert.Throws<NullReferenceException>(() => generator.GenerateStatement(e)); } [Fact] public void GenerateStatements_InvokeWithWriter_Success() { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { var stmts = new CodeStatementCollection(new CodeStatement[] { new CodeGotoStatement(), new CodeGotoStatement() }); int generateStatementCallCount = 0; generator.GenerateGotoStatementAction = (actualE) => { Assert.Same(stmts[generateStatementCallCount], actualE); generateStatementCallCount++; }; generator.GenerateStatements(stmts); Assert.Equal(2, generateStatementCallCount); }); } [Fact] public void GenerateStatements_InvokeEmptyStatementsWithWriter_Success() { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { var stmts = new CodeStatementCollection(); generator.GenerateStatements(stmts); Assert.Empty(writer.ToString()); }); } [Fact] public void GenerateStatements_InvokeEmptyStatementsWithoutWriter_Nop() { CodeGeneratorTests generator = this; var stmts = new CodeStatementCollection(); generator.GenerateStatements(stmts); } [Fact] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Not fixed on NetFX")] public void GenerateStatements_NullStmts_ThrowsArgumentNullException() { CodeGeneratorTests generator = this; Assert.Throws<ArgumentNullException>("stmts", () => generator.GenerateStatements(null)); } [Fact] public void GenerateStatements_InvalidStatementInStmts_ThrowsArgumentException() { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { var stmts = new CodeStatementCollection(new CodeStatement[] { new CodeStatement() }); Assert.Throws<ArgumentException>("e", () => generator.GenerateStatements(stmts)); }); } [Fact] public void GenerateStatements_InvokeWithoutWriter_ThrowsNullReferenceException() { CodeGeneratorTests generator = this; var stmts = new CodeStatementCollection(new CodeStatement[] { new CodeStatement() }); Assert.Throws<NullReferenceException>(() => generator.GenerateStatements(stmts)); } [Fact] public void GenerateTypeOfExpression_Invoke_Success() { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { var e = new CodeTypeOfExpression(new CodeTypeReference()); generator.GenerateTypeOfExpressionAction = (actualE, baseMethod) => baseMethod(actualE); int outputTypeCallCount = 0; generator.OutputTypeAction = (actualTypeRef) => { Assert.Same(e.Type, actualTypeRef); writer.Write("Type"); outputTypeCallCount++; }; generator.GenerateTypeOfExpression(e); Assert.Equal("typeof(Type)", writer.ToString()); Assert.Equal(1, outputTypeCallCount); }); } [Fact] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Not fixed on NetFX")] public void GenerateTypeOfExpression_NullE_ThrowsArgumentNullException() { CodeGeneratorTests generator = this; generator.GenerateTypeOfExpressionAction = (actualE, baseMethod) => baseMethod(actualE); Assert.Throws<ArgumentNullException>("e", () => generator.GenerateTypeOfExpression(null)); } [Fact] public void GenerateTypeOfExpression_InvokeWithoutWriter_ThrowsNullReferenceException() { CodeGeneratorTests generator = this; var e = new CodeTypeOfExpression(); generator.GenerateTypeOfExpressionAction = (actualE, baseMethod) => baseMethod(actualE); Assert.Throws<NullReferenceException>(() => generator.GenerateTypeOfExpression(e)); } [Fact] public void GenerateTypeReferenceExpression_Invoke_Success() { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { var e = new CodeTypeReferenceExpression(new CodeTypeReference()); generator.GenerateTypeReferenceExpressionAction = (actualE, baseMethod) => baseMethod(actualE); int outputTypeCallCount = 0; generator.OutputTypeAction = (actualTypeRef) => { Assert.Same(e.Type, actualTypeRef); writer.Write("Type"); outputTypeCallCount++; }; generator.GenerateTypeReferenceExpression(e); Assert.Equal("Type", writer.ToString()); Assert.Equal(1, outputTypeCallCount); }); } [Fact] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Not fixed on NetFX")] public void GenerateTypeReferenceExpression_NullE_ThrowsArgumentNullException() { CodeGeneratorTests generator = this; generator.GenerateTypeReferenceExpressionAction = (actualE, baseMethod) => baseMethod(actualE); Assert.Throws<ArgumentNullException>("e", () => generator.GenerateTypeReferenceExpression(null)); } public static IEnumerable<object[]> GenerateTypes_TestData() { yield return new object[] { new CodeTypeDeclaration(), null, $"{Environment.NewLine}TypeStart TypeEnd{Environment.NewLine}TypeStart TypeEnd" }; yield return new object[] { new CodeTypeDeclaration(), new CodeGeneratorOptions(), $"{Environment.NewLine}TypeStart TypeEnd{Environment.NewLine}TypeStart TypeEnd" }; yield return new object[] { new CodeTypeDeclaration(), new CodeGeneratorOptions { BlankLinesBetweenMembers = false }, $"TypeStart TypeEndTypeStart TypeEnd" }; yield return new object[] { new CodeTypeDeclaration("name") { IsClass = true }, null, $"{Environment.NewLine}TypeStart TypeEnd{Environment.NewLine}TypeStart TypeEnd" }; yield return new object[] { new CodeTypeDeclaration("name") { IsEnum = true }, null, $"{Environment.NewLine}TypeStart TypeEnd{Environment.NewLine}TypeStart TypeEnd" }; yield return new object[] { new CodeTypeDeclaration("name") { IsInterface = true }, null, $"{Environment.NewLine}TypeStart TypeEnd{Environment.NewLine}TypeStart TypeEnd" }; yield return new object[] { new CodeTypeDeclaration("name") { IsStruct = true }, null, $"{Environment.NewLine}TypeStart TypeEnd{Environment.NewLine}TypeStart TypeEnd" }; } [Theory] [MemberData(nameof(GenerateTypes_TestData))] public void GenerateTypes_InvokeClassWithWriter_Success(CodeTypeDeclaration type, CodeGeneratorOptions options, string expected) { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { var e = new CodeNamespace(); e.Types.Add(new CodeTypeDeclaration()); e.Types.Add(type); int generateTypeStartCallCount = 0; int generateTypeEndCallCount = 0; generator.GenerateTypeStartAction = (actualE) => { Assert.Same(e.Types[generateTypeStartCallCount], actualE); Assert.Equal(generateTypeStartCallCount, generateTypeEndCallCount); writer.Write("TypeStart "); generateTypeStartCallCount++; }; generator.GenerateTypeEndAction = (actualE) => { Assert.Same(e.Types[generateTypeEndCallCount], actualE); writer.Write("TypeEnd"); generateTypeEndCallCount++; }; generator.GenerateTypes(e); Assert.Equal(expected, writer.ToString()); Assert.Equal(2, generateTypeStartCallCount); Assert.Equal(2, generateTypeEndCallCount); Assert.Same(e.Types[1], generator.CurrentClass); Assert.Null(generator.CurrentMember); Assert.Equal("<% unknown %>", generator.CurrentMemberName); Assert.Same(e.Types[1].Name, generator.CurrentTypeName); Assert.Equal(e.Types[1].IsClass, generator.IsCurrentClass); Assert.False(generator.IsCurrentDelegate); Assert.Equal(e.Types[1].IsEnum, generator.IsCurrentEnum); Assert.Equal(e.Types[1].IsInterface, generator.IsCurrentInterface); Assert.Equal(e.Types[1].IsStruct, generator.IsCurrentStruct); }, options); } [Fact] public void GenerateTypes_InvokeDelegateWithWriter_Success() { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { var e = new CodeNamespace(); e.Types.Add(new CodeTypeDeclaration()); e.Types.Add(new CodeTypeDelegate("name")); int generateTypeStartCallCount = 0; int generateTypeEndCallCount = 0; generator.GenerateTypeStartAction = (actualE) => { Assert.Same(e.Types[generateTypeStartCallCount], actualE); Assert.Equal(generateTypeStartCallCount, generateTypeEndCallCount); writer.Write("TypeStart "); generateTypeStartCallCount++; }; generator.GenerateTypeEndAction = (actualE) => { Assert.Same(e.Types[generateTypeEndCallCount], actualE); writer.Write("TypeEnd"); generateTypeEndCallCount++; }; generator.GenerateTypes(e); Assert.Equal($"{Environment.NewLine}TypeStart TypeEnd{Environment.NewLine}TypeStart TypeEnd", writer.ToString()); Assert.Equal(2, generateTypeStartCallCount); Assert.Equal(2, generateTypeEndCallCount); Assert.Same(e.Types[1], generator.CurrentClass); Assert.Null(generator.CurrentMember); Assert.Equal("<% unknown %>", generator.CurrentMemberName); Assert.Same(e.Types[1].Name, generator.CurrentTypeName); Assert.False(generator.IsCurrentClass); Assert.True(generator.IsCurrentDelegate); Assert.False(generator.IsCurrentEnum); Assert.False(generator.IsCurrentInterface); Assert.False(generator.IsCurrentStruct); }); } [Fact] public void GenerateTypes_InvokeWithCommentsDirectivesAndLinePragma_Success() { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { var type = new CodeTypeDeclaration { LinePragma = new CodeLinePragma() }; type.Comments.Add(new CodeCommentStatement("Comment")); type.Comments.Add(new CodeCommentStatement("Comment")); type.StartDirectives.Add(new CodeDirective()); type.StartDirectives.Add(new CodeChecksumPragma()); type.StartDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); type.EndDirectives.Add(new CodeDirective()); type.EndDirectives.Add(new CodeChecksumPragma()); type.EndDirectives.Add(new CodeChecksumPragma("fileName", Guid.NewGuid(), new byte[0])); var e = new CodeNamespace(); e.Types.Add(type); int generateCommentStatementsCallCount = 0; int generateCommentCallCount = 0; int generateLinePragmaStartCallCount = 0; int generateTypeStartCallCount = 0; int generateTypeEndCallCount = 0; int generateDirectivesCallCount = 0; int generateLinePragmaEndCallCount = 0; generator.GenerateCommentStatementsAction = (actualE, baseMethod) => { baseMethod(actualE); Assert.Same(type.Comments, actualE); writer.Write("Comments "); Assert.Equal(0, generateLinePragmaStartCallCount); Assert.Equal(0, generateTypeStartCallCount); Assert.Equal(1, generateDirectivesCallCount); Assert.Equal(0, generateLinePragmaEndCallCount); Assert.Equal(0, generateTypeEndCallCount); generateCommentStatementsCallCount++; }; generator.GenerateCommentAction = (actualE) => { Assert.Same(type.Comments[generateCommentCallCount].Comment, actualE); writer.Write("Comment "); generateCommentCallCount++; }; generator.GenerateLinePragmaStartAction = (actualE) => { Assert.Same(type.LinePragma, actualE); Assert.Equal(0, generateTypeStartCallCount); Assert.Equal(1, generateDirectivesCallCount); Assert.Equal(0, generateLinePragmaEndCallCount); Assert.Equal(0, generateTypeEndCallCount); writer.Write("LinePragmaStart "); generateLinePragmaStartCallCount++; }; generator.GenerateTypeStartAction = (actualE) => { Assert.Same(type, actualE); Assert.Equal(1, generateDirectivesCallCount); Assert.Equal(0, generateLinePragmaEndCallCount); Assert.Equal(0, generateTypeEndCallCount); writer.Write("TypeStart "); generateTypeStartCallCount++; }; generator.GenerateTypeEndAction = (actualE) => { Assert.Same(type, actualE); Assert.Equal(1, generateDirectivesCallCount); Assert.Equal(0, generateLinePragmaEndCallCount); writer.Write("TypeEnd "); generateTypeEndCallCount++; }; generator.GenerateLinePragmaEndAction = (actualE) => { Assert.Same(type.LinePragma, actualE); Assert.Equal(1, generateDirectivesCallCount); Assert.Equal(1, generateTypeEndCallCount); writer.Write("LinePragmaEnd "); generateLinePragmaEndCallCount++; }; generator.GenerateDirectivesAction = (actualDirectives, baseMethod) => { baseMethod(actualDirectives); Assert.Same(generateDirectivesCallCount == 0 ? type.StartDirectives : type.EndDirectives, actualDirectives); writer.Write(generateDirectivesCallCount == 0 ? "StartDirectives " : "EndDirectives"); generateDirectivesCallCount++; }; generator.GenerateTypes(e); Assert.Equal($"{Environment.NewLine}StartDirectives Comment Comment Comments LinePragmaStart TypeStart TypeEnd LinePragmaEnd EndDirectives", writer.ToString()); Assert.Equal(1, generateCommentStatementsCallCount); Assert.Equal(2, generateCommentCallCount); Assert.Equal(1, generateLinePragmaStartCallCount); Assert.Equal(1, generateTypeStartCallCount); Assert.Equal(1, generateTypeEndCallCount); Assert.Equal(1, generateLinePragmaEndCallCount); Assert.Equal(2, generateDirectivesCallCount); Assert.Same(type, generator.CurrentClass); Assert.Null(generator.CurrentMember); Assert.Equal("<% unknown %>", generator.CurrentMemberName); Assert.Same(type.Name, generator.CurrentTypeName); Assert.Equal(type.IsClass, generator.IsCurrentClass); Assert.False(generator.IsCurrentDelegate); Assert.Equal(type.IsEnum, generator.IsCurrentEnum); Assert.Equal(type.IsInterface, generator.IsCurrentInterface); Assert.Equal(type.IsStruct, generator.IsCurrentStruct); }); } [Fact] public void GenerateTypes_InvokeWithMembers_Success() { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { var type = new CodeTypeDeclaration(); type.Members.Add(new CodeTypeMember()); var e = new CodeNamespace(); e.Types.Add(type); int generateTypeStartCallCount = 0; int generateTypeEndCallCount = 0; generator.GenerateTypeStartAction = (actualE) => { Assert.Same(type, actualE); Assert.Equal(0, generateTypeEndCallCount); writer.Write("TypeStart "); generateTypeStartCallCount++; }; generator.GenerateTypeEndAction = (actualE) => { Assert.Same(type, actualE); writer.Write("TypeEnd"); generateTypeEndCallCount++; }; generator.GenerateTypes(e); Assert.Equal($"{Environment.NewLine}TypeStart TypeEnd", writer.ToString()); Assert.Equal(1, generateTypeStartCallCount); Assert.Equal(1, generateTypeEndCallCount); Assert.Same(type, generator.CurrentClass); }); } [Fact] public void GenerateTypes_InvokeEmpty_Nop() { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { var e = new CodeNamespace(); generator.GenerateTypes(e); Assert.Empty(writer.ToString()); }); } [Fact] public void GenerateTypes_InvokeEmptyEWithoutWriter_Nop() { CodeGeneratorTests generator = this; var e = new CodeNamespace(); generator.GenerateTypes(e); } [Fact] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Not fixed on NetFX")] public void GenerateTypes_NullE_ThrowsArgumentNullException() { CodeGeneratorTests generator = this; Assert.Throws<ArgumentNullException>("e", () => generator.GenerateTypes(null)); } [Fact] public void GenerateTypes_NullValueInE_ThrowsArgumentException() { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { var type = new CodeTypeDeclaration(); type.Comments.Add(new CodeCommentStatement()); var e = new CodeNamespace(); e.Types.Add(type); generator.GenerateNamespaceAction = (actualE, baseMethod) => baseMethod(actualE); generator.GenerateCommentStatementsAction = (actualE, baseMethod) => baseMethod(actualE); generator.GenerateNamespaceStartAction = (actualE) => {}; generator.GenerateNamespaceEndAction = (actualE) => {}; Assert.Throws<ArgumentException>("e", () => generator.GenerateTypes(e)); }); } [Theory] [InlineData(null, false)] [InlineData("", false)] [InlineData(" ", false)] [InlineData("a", true)] [InlineData("A", true)] [InlineData("\u01C5", true)] [InlineData("\u02B0", true)] [InlineData("\u2163", true)] [InlineData("\u0620", true)] [InlineData("_", true)] [InlineData("_aA\u01C5\u02B0\u2163\u0620_0", true)] [InlineData("aA\u01C5\u02B0\u2163\u0620_0", true)] [InlineData(" ", false)] [InlineData("a ", false)] [InlineData("#", false)] [InlineData("a#", false)] [InlineData("\u0300", false)] [InlineData("a\u0300", true)] [InlineData("\u0903", false)] [InlineData("a\u0903", true)] [InlineData("\u203F", false)] [InlineData("a\u203F", true)] [InlineData("0", false)] [InlineData("1", false)] [InlineData(":", false)] [InlineData(".", false)] [InlineData("$", false)] [InlineData("+", false)] [InlineData("<", false)] [InlineData(">", false)] [InlineData("-", false)] [InlineData("[", false)] [InlineData("]", false)] [InlineData(",", false)] [InlineData("&", false)] [InlineData("*", false)] [InlineData("`", false)] [InlineData("a0", true)] [InlineData("a1", true)] [InlineData("a:", false)] [InlineData("a.", false)] [InlineData("a$", false)] [InlineData("a+", false)] [InlineData("a<", false)] [InlineData("a>", false)] [InlineData("a-", false)] [InlineData("a[", false)] [InlineData("a]", false)] [InlineData("a,", false)] [InlineData("a&", false)] [InlineData("a*", false)] [InlineData("\0", false)] [InlineData("a\0", false)] [InlineData("\r", false)] [InlineData("a\r", false)] [InlineData("\n", false)] [InlineData("a\n", false)] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Not fixed on NetFX")] public void IsValidLanguageIndependentIdentifier_Invoke_ReturnsExpected(string value, bool expected) { Assert.Equal(expected, CodeGenerator.IsValidLanguageIndependentIdentifier(value)); } [Theory] [InlineData(null, "1")] [InlineData("", "1")] [InlineData("name", "name=1")] public void OutputAttributeArgument_Invoke_Success(string name, string expected) { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { var arg = new CodeAttributeArgument(name, new CodePrimitiveExpression(1)); generator.OutputIdentifierAction = (actualIdentifier, baseMethod) => baseMethod(actualIdentifier); generator.OutputAttributeArgumentAction = (actualArg, baseMethod) => baseMethod(actualArg); generator.GeneratePrimitiveExpressionAction = (actualE, baseMethod) => baseMethod(actualE); generator.OutputAttributeArgument(arg); Assert.Equal(expected, writer.ToString()); }); } [Fact] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Not fixed on NetFX")] public void OutputAttributeArgument_NullArg_ThrowsArgumentNullException() { CodeGeneratorTests generator = this; generator.OutputIdentifierAction = (actualIdentifier, baseMethod) => baseMethod(actualIdentifier); generator.OutputAttributeArgumentAction = (actualArg, baseMethod) => baseMethod(actualArg); generator.GeneratePrimitiveExpressionAction = (actualE, baseMethod) => baseMethod(actualE); Assert.Throws<ArgumentNullException>("arg", () => generator.OutputAttributeArgument(null)); } [Fact] public void OutputAttributeArgument_NullArgValue_ThrowsArgumentNullException() { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { var arg = new CodeAttributeArgument(); generator.OutputIdentifierAction = (actualIdentifier, baseMethod) => baseMethod(actualIdentifier); generator.OutputAttributeArgumentAction = (actualArg, baseMethod) => baseMethod(actualArg); generator.GeneratePrimitiveExpressionAction = (actualE, baseMethod) => baseMethod(actualE); Assert.Throws<ArgumentNullException>("e", () => generator.OutputAttributeArgument(arg)); }); } [Theory] [InlineData(null)] [InlineData("")] [InlineData("name")] public void OutputAttributeArgument_InvokeNonNullNameWithoutOutput_ThrowsNullReferenceException(string name) { CodeGeneratorTests generator = this; var arg = new CodeAttributeArgument(name, new CodePrimitiveExpression(1)); generator.OutputIdentifierAction = (actualIdentifier, baseMethod) => baseMethod(actualIdentifier); generator.OutputAttributeArgumentAction = (actualArg, baseMethod) => baseMethod(actualArg); generator.GeneratePrimitiveExpressionAction = (actualE, baseMethod) => baseMethod(actualE); Assert.Throws<NullReferenceException>(() => generator.OutputAttributeArgument(arg)); } [Fact] public void OutputAttributeDeclarations_NonEmptyAttributes_Success() { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { var attributes = new CodeAttributeDeclarationCollection(new CodeAttributeDeclaration[] { new CodeAttributeDeclaration(), new CodeAttributeDeclaration(string.Empty), new CodeAttributeDeclaration("name"), new CodeAttributeDeclaration("name", new CodeAttributeArgument(new CodePrimitiveExpression(1))), new CodeAttributeDeclaration("name", new CodeAttributeArgument("AttributeName", new CodePrimitiveExpression(1))), new CodeAttributeDeclaration("name", new CodeAttributeArgument("AttributeName1", new CodePrimitiveExpression(1)), new CodeAttributeArgument("AttributeName2", new CodePrimitiveExpression(2))) }); int generateAttributeDeclarationsStartCallCount = 0; int generateAttributeDeclarationsEndCallCount = 0; generator.OutputAttributeDeclarationsAction = (actualAttributes, baseMethod) => baseMethod(actualAttributes); generator.GenerateAttributeDeclarationsStartAction = (actualAttributes) => { Assert.Same(attributes, actualAttributes); Assert.Equal(0, generateAttributeDeclarationsEndCallCount); generator.Output.Write("StartAttributes "); generateAttributeDeclarationsStartCallCount++; }; generator.OutputIdentifierAction = (actualIdentifier, baseMethod) => baseMethod(actualIdentifier); generator.OutputAttributeArgumentAction = (actualArg, baseMethod) => baseMethod(actualArg); generator.GeneratePrimitiveExpressionAction = (actualE, baseMethod) => baseMethod(actualE); generator.GenerateAttributeDeclarationsEndAction = (actualAttributes) => { Assert.Same(attributes, actualAttributes); generator.Output.Write(" EndAttributes"); generateAttributeDeclarationsEndCallCount++; }; generator.OutputAttributeDeclarations(attributes); Assert.Equal(1, generateAttributeDeclarationsStartCallCount); Assert.Equal(1, generateAttributeDeclarationsStartCallCount); Assert.Equal($"StartAttributes (), {Environment.NewLine}(), {Environment.NewLine}name(), {Environment.NewLine}name(1), {Environment.NewLine}name(AttributeName=1), {Environment.NewLine}name(AttributeName1=1, AttributeName2=2) EndAttributes", writer.ToString()); }); } [Fact] public void OutputAttributeDeclarations_InvokeEmptyAttributes_Nop() { CodeGeneratorTests generator = this; generator.OutputAttributeDeclarationsAction = (actualAttributes, baseMethod) => baseMethod(actualAttributes); generator.OutputAttributeDeclarations(new CodeAttributeDeclarationCollection()); } [Fact] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Not fixed on NetFX")] public void OutputAttributeDeclarations_NullAttributes_ThrowsArgumentNullException() { CodeGeneratorTests generator = this; generator.OutputAttributeDeclarationsAction = (actualAttributes, baseMethod) => baseMethod(actualAttributes); Assert.Throws<ArgumentNullException>("attributes", () => generator.OutputAttributeDeclarations(null)); } [Fact] public void OutputAttributeDeclarations_InvokeNonEmptyAttributesNoOutput_ThrowsNullReferenceException() { CodeGeneratorTests generator = this; var attributes = new CodeAttributeDeclarationCollection(new CodeAttributeDeclaration[] { new CodeAttributeDeclaration(), new CodeAttributeDeclaration(string.Empty), new CodeAttributeDeclaration("name") }); generator.OutputAttributeDeclarationsAction = (actualAttributes, baseMethod) => baseMethod(actualAttributes); generator.GenerateAttributeDeclarationsStartAction = (actualAttributes) => {}; generator.GenerateAttributeDeclarationsEndAction = (actualAttributes) => {}; Assert.Throws<NullReferenceException>(() => generator.OutputAttributeDeclarations(attributes)); } [Fact] public void OutputAttributeDeclarations_NullArgumentExpressionInAttributes_ThrowsArgumentNullException() { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { var attributes = new CodeAttributeDeclarationCollection(new CodeAttributeDeclaration[] { new CodeAttributeDeclaration("name", new CodeAttributeArgument()) }); generator.OutputAttributeDeclarationsAction = (actualAttributes, baseMethod) => baseMethod(actualAttributes); generator.GenerateAttributeDeclarationsStartAction = (actualAttributes) => { }; generator.OutputAttributeArgumentAction = (actualArg, baseMethod) => baseMethod(actualArg); generator.GenerateAttributeDeclarationsEndAction = (actualAttributes) => { }; Assert.Throws<ArgumentNullException>("e", () => generator.OutputAttributeDeclarations(attributes)); }); } [Theory] [InlineData(FieldDirection.In, "")] [InlineData(FieldDirection.Out, "out ")] [InlineData(FieldDirection.Ref, "ref ")] [InlineData(FieldDirection.In - 1, "")] [InlineData(FieldDirection.Ref + 1, "")] public void OutputDirection_Invoke_Success(FieldDirection direction, string expected) { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { generator.OutputDirectionAction = (actualDirection, baseMethod) => baseMethod(actualDirection); generator.OutputDirection(direction); Assert.Equal(expected, writer.ToString()); }); } [Theory] [InlineData(FieldDirection.Out)] [InlineData(FieldDirection.Ref)] public void OutputDirection_InvokeWithoutOutput_ThrowsNullReferenceException(FieldDirection direction) { CodeGeneratorTests generator = this; generator.OutputDirectionAction = (actualDirection, baseMethod) => baseMethod(actualDirection); Assert.Throws<NullReferenceException>(() => generator.OutputDirection(direction)); } [Theory] [InlineData(FieldDirection.In)] [InlineData(FieldDirection.In - 1)] [InlineData(FieldDirection.Ref + 1)] public void OutputDirection_InvokeWithoutOutputInvaliddirection_Ndirection(FieldDirection direction) { CodeGeneratorTests generator = this; generator.OutputDirectionAction = (actualDirection, baseMethod) => baseMethod(actualDirection); generator.OutputDirection(direction); } [Theory] [InlineData(MemberAttributes.Abstract, "")] [InlineData(MemberAttributes.Final, "")] [InlineData(MemberAttributes.Static, "static ")] [InlineData(MemberAttributes.Override, "")] [InlineData(MemberAttributes.Const, "const ")] [InlineData(MemberAttributes.ScopeMask, "")] [InlineData(MemberAttributes.New, "new ")] [InlineData(MemberAttributes.VTableMask, "")] [InlineData(MemberAttributes.Overloaded, "")] [InlineData(MemberAttributes.Assembly, "")] [InlineData(MemberAttributes.FamilyAndAssembly, "")] [InlineData(MemberAttributes.Family, "")] [InlineData(MemberAttributes.FamilyOrAssembly, "")] [InlineData(MemberAttributes.Private, "")] [InlineData(MemberAttributes.Public, "")] [InlineData(MemberAttributes.AccessMask, "")] [InlineData(MemberAttributes.New | MemberAttributes.Private, "new ")] [InlineData(MemberAttributes.Static | MemberAttributes.Private, "static ")] [InlineData(MemberAttributes.Const | MemberAttributes.Private, "const ")] [InlineData(MemberAttributes.New | MemberAttributes.Static, "new static ")] [InlineData(MemberAttributes.New | MemberAttributes.Const, "new const ")] public void OutputFieldScopeModifier_Invoke_Success(MemberAttributes attributes, string expected) { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { generator.OutputFieldScopeModifierAction = (actualAttributes, baseMethod) => baseMethod(actualAttributes); generator.OutputFieldScopeModifier(attributes); Assert.Equal(expected, writer.ToString()); }); } [Theory] [InlineData(MemberAttributes.Static)] [InlineData(MemberAttributes.Const)] [InlineData(MemberAttributes.New)] [InlineData(MemberAttributes.New | MemberAttributes.Private)] [InlineData(MemberAttributes.Static | MemberAttributes.Private)] [InlineData(MemberAttributes.Const | MemberAttributes.Private)] [InlineData(MemberAttributes.New | MemberAttributes.Static)] [InlineData(MemberAttributes.New | MemberAttributes.Const)] public void OutputFieldScopeModifier_InvokeWithoutOutput_ThrowsNullReferenceException(MemberAttributes attributes) { CodeGeneratorTests generator = this; generator.OutputFieldScopeModifierAction = (actualAttributes, baseMethod) => baseMethod(actualAttributes); Assert.Throws<NullReferenceException>(() => generator.OutputFieldScopeModifier(attributes)); } [Theory] [InlineData(MemberAttributes.Abstract)] [InlineData(MemberAttributes.Final)] [InlineData(MemberAttributes.Override)] [InlineData(MemberAttributes.ScopeMask)] [InlineData(MemberAttributes.VTableMask)] [InlineData(MemberAttributes.Overloaded)] [InlineData(MemberAttributes.Assembly)] [InlineData(MemberAttributes.FamilyAndAssembly)] [InlineData(MemberAttributes.Family)] [InlineData(MemberAttributes.FamilyOrAssembly)] [InlineData(MemberAttributes.Private)] [InlineData(MemberAttributes.Public)] [InlineData(MemberAttributes.AccessMask)] public void OutputFieldScopeModifier_InvokeWithoutOutputInvalid_Nop(MemberAttributes attributes) { CodeGeneratorTests generator = this; generator.OutputFieldScopeModifierAction = (actualAttributes, baseMethod) => baseMethod(actualAttributes); generator.OutputFieldScopeModifier(attributes); } [Theory] [InlineData(null, "")] [InlineData("", "")] [InlineData("ident", "ident")] public void OutputIdentifier_InvokeWithOutput_Appends(string st, string expected) { CodeGeneratorTests generator = this; generator.PerformActionWithOutput(writer => { generator.OutputIdentifierAction = (actualSt, baseMethod) => baseMethod(actualSt); generator.OutputIdentifier(st); Assert.Equal(expected, writer.ToString()); }); } [Theory] [InlineData(null)] [InlineData("")] [InlineData("ident")] public void OutputIdentifier_InvokeWithoutOutput_ThrowsNullReferenceException(string ident) { CodeGeneratorTests generator = this; generator.OutputIdentifierAction = (actualSt, baseMethod) => baseMethod(actualSt); Assert.Throws<NullReferenceException>(() => generator.OutputIdentifier(ident)); } [Theory] [InlineData(MemberAttributes.Abstract, "")] [InlineData(MemberAttributes.Final, "")] [InlineData(MemberAttributes.Static, "")] [InlineData(MemberAttributes.Override, "")] [InlineData(MemberAttributes.Const, "")] [InlineData(MemberAttributes.ScopeMask, "")] [InlineData(MemberAttributes.New, "")] [InlineData(MemberAttributes.VTableMask, "")] [InlineData(MemberAttributes.Overloaded, "")] [InlineData(MemberAttributes.Assembly, "internal ")] [InlineData(MemberAttributes.FamilyAndAssembly, "internal ")] [InlineData(MemberAttributes.Family, "protected ")] [InlineData(MemberAttributes.FamilyOrAssembly, "protected internal ")] [InlineData(MemberAttributes.Private, "private ")] [InlineData(MemberAttributes.Public, "public ")] [InlineData(MemberAttributes.AccessMask, "")] [InlineData(MemberAttributes.New | MemberAttributes.Assembly, "internal ")] [InlineData(MemberAttributes.New | MemberAttributes.FamilyAndAssembly, "internal ")] [InlineData(MemberAttributes.New | MemberAttributes.Family, "protected ")] [InlineData(MemberAttributes.New | MemberAttributes.FamilyOrAssembly, "protected internal ")] [InlineData(MemberAttributes.New | MemberAttributes.Private, "private ")] [InlineData(MemberAttributes.New | MemberAttributes.Public, "public ")] [InlineData(MemberAttributes.New | MemberAttributes.Abstract | MemberAttributes.Assembly, "internal ")] [InlineData(MemberAttributes.New | MemberAttributes.Final | MemberAttributes.Assembly, "internal ")] [InlineData(MemberAttributes.New | MemberAttributes.Static | MemberAttributes.Assembly, "internal ")] [InlineData(MemberAttributes.New | MemberAttributes.Override | MemberAttributes.Assembly, "internal ")] [InlineData(MemberAttributes.New | MemberAttributes.Const | MemberAttributes.Assembly, "internal ")] [InlineData(MemberAttributes.New | MemberAttributes.Abstract | MemberAttributes.FamilyAndAssembly, "internal ")] [InlineData(MemberAttributes.New | MemberAttributes.Final | MemberAttributes.FamilyAndAssembly, "internal ")] [InlineData(MemberAttributes.New | MemberAttributes.Static | MemberAttributes.FamilyAndAssembly, "internal ")] [InlineData(MemberAttributes.New | MemberAttributes.Override | MemberAttributes.FamilyAndAssembly, "internal ")] [InlineData(MemberAttributes.New | MemberAttributes.Const | MemberAttributes.FamilyAndAssembly, "internal ")] [InlineData(MemberAttributes.New | MemberAttributes.Abstract | MemberAttributes.Family, "protected ")] [InlineData(MemberAttributes.New | MemberAttributes.Final | MemberAttributes.Family, "protected ")] [InlineData(MemberAttributes.New | MemberAttributes.Static | MemberAttributes.Family, "protected ")] [InlineData(MemberAttributes.New | MemberAttributes.Override | MemberAttributes.Family, "protected ")] [InlineData(MemberAttributes.New | MemberAttributes.Const | MemberAttributes.Family, "protected ")] [InlineData(MemberAttributes.New | MemberAttributes.Abstract | MemberAttributes.FamilyOrAssembly, "protected internal ")] [InlineData(MemberAttributes.New | MemberAttributes.Final | MemberAttributes.FamilyOrAssembly, "protected internal ")] [InlineData(MemberAttributes.New | MemberAttributes.Static | MemberAttributes.FamilyOrAssembly, "protected internal ")] [InlineData(MemberAttributes.New | MemberAttributes.Override | MemberAttributes.FamilyOrAssembly, "protected internal ")] [InlineData(MemberAttributes.New | MemberAttributes.Const | MemberAttributes.FamilyOrAssembly, "protected internal ")] [InlineData(MemberAttributes.New | MemberAttributes.Abstract | MemberAttributes.Private, "private ")] [InlineData(MemberAttributes.New | MemberAttributes.Final | MemberAttributes.Private, "private ")] [InlineData(MemberAttributes.New | MemberAttributes.Static | MemberAttributes.Private, "private ")] [InlineData(MemberAttributes.New | MemberAttributes.Override | MemberAttributes.Private, "private ")] [InlineData(MemberAttributes.New | MemberAttributes.Const | MemberAttributes.Private, "private ")] [InlineData(MemberAttributes.New | MemberAttributes.Abstract | MemberAttributes.Public, "public ")] [InlineData(MemberAttributes.New | MemberAttributes.Final | MemberAttributes.Public, "public ")] [InlineData(MemberAttributes.New | MemberAttributes.Static | MemberAttributes.Public, "public ")] [InlineData(MemberAttributes.New | MemberAttributes.Override | MemberAttributes.Public, "public ")] [InlineData(MemberAttributes.New | MemberAttributes.Const | MemberAttributes.Public, "public ")] public void OutputMemberAccessModifier_Invoke_Success(MemberAttributes attributes, string expected) { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { generator.OutputMemberAccessModifierAction = (actualAttributes, baseMethod) => baseMethod(actualAttributes); generator.OutputMemberAccessModifier(attributes); Assert.Equal(expected, writer.ToString()); }); } [Theory] [InlineData(MemberAttributes.Assembly)] [InlineData(MemberAttributes.FamilyAndAssembly)] [InlineData(MemberAttributes.Family)] [InlineData(MemberAttributes.FamilyOrAssembly)] [InlineData(MemberAttributes.Private)] [InlineData(MemberAttributes.Public)] [InlineData(MemberAttributes.New | MemberAttributes.Assembly)] [InlineData(MemberAttributes.New | MemberAttributes.FamilyAndAssembly)] [InlineData(MemberAttributes.New | MemberAttributes.Family)] [InlineData(MemberAttributes.New | MemberAttributes.FamilyOrAssembly)] [InlineData(MemberAttributes.New | MemberAttributes.Private)] [InlineData(MemberAttributes.New | MemberAttributes.Public)] [InlineData(MemberAttributes.New | MemberAttributes.Abstract | MemberAttributes.Assembly)] [InlineData(MemberAttributes.New | MemberAttributes.Final | MemberAttributes.Assembly)] [InlineData(MemberAttributes.New | MemberAttributes.Static | MemberAttributes.Assembly)] [InlineData(MemberAttributes.New | MemberAttributes.Override | MemberAttributes.Assembly)] [InlineData(MemberAttributes.New | MemberAttributes.Const | MemberAttributes.Assembly)] [InlineData(MemberAttributes.New | MemberAttributes.Abstract | MemberAttributes.FamilyAndAssembly)] [InlineData(MemberAttributes.New | MemberAttributes.Final | MemberAttributes.FamilyAndAssembly)] [InlineData(MemberAttributes.New | MemberAttributes.Static | MemberAttributes.FamilyAndAssembly)] [InlineData(MemberAttributes.New | MemberAttributes.Override | MemberAttributes.FamilyAndAssembly)] [InlineData(MemberAttributes.New | MemberAttributes.Const | MemberAttributes.FamilyAndAssembly)] [InlineData(MemberAttributes.New | MemberAttributes.Abstract | MemberAttributes.Family)] [InlineData(MemberAttributes.New | MemberAttributes.Final | MemberAttributes.Family)] [InlineData(MemberAttributes.New | MemberAttributes.Static | MemberAttributes.Family)] [InlineData(MemberAttributes.New | MemberAttributes.Override | MemberAttributes.Family)] [InlineData(MemberAttributes.New | MemberAttributes.Const | MemberAttributes.Family)] [InlineData(MemberAttributes.New | MemberAttributes.Abstract | MemberAttributes.FamilyOrAssembly)] [InlineData(MemberAttributes.New | MemberAttributes.Final | MemberAttributes.FamilyOrAssembly)] [InlineData(MemberAttributes.New | MemberAttributes.Static | MemberAttributes.FamilyOrAssembly)] [InlineData(MemberAttributes.New | MemberAttributes.Override | MemberAttributes.FamilyOrAssembly)] [InlineData(MemberAttributes.New | MemberAttributes.Const | MemberAttributes.FamilyOrAssembly)] [InlineData(MemberAttributes.New | MemberAttributes.Abstract | MemberAttributes.Private)] [InlineData(MemberAttributes.New | MemberAttributes.Final | MemberAttributes.Private)] [InlineData(MemberAttributes.New | MemberAttributes.Static | MemberAttributes.Private)] [InlineData(MemberAttributes.New | MemberAttributes.Override | MemberAttributes.Private)] [InlineData(MemberAttributes.New | MemberAttributes.Const | MemberAttributes.Private)] [InlineData(MemberAttributes.New | MemberAttributes.Abstract | MemberAttributes.Public)] [InlineData(MemberAttributes.New | MemberAttributes.Final | MemberAttributes.Public)] [InlineData(MemberAttributes.New | MemberAttributes.Static | MemberAttributes.Public)] [InlineData(MemberAttributes.New | MemberAttributes.Override | MemberAttributes.Public)] [InlineData(MemberAttributes.New | MemberAttributes.Const | MemberAttributes.Public)] public void OutputMemberAccessModifier_InvokeWithoutOutput_ThrowsNullReferenceException(MemberAttributes attributes) { CodeGeneratorTests generator = this; generator.OutputMemberAccessModifierAction = (actualAttributes, baseMethod) => baseMethod(actualAttributes); Assert.Throws<NullReferenceException>(() => generator.OutputMemberAccessModifier(attributes)); } [Theory] [InlineData(MemberAttributes.Abstract)] [InlineData(MemberAttributes.Final)] [InlineData(MemberAttributes.Static)] [InlineData(MemberAttributes.Override)] [InlineData(MemberAttributes.Const)] [InlineData(MemberAttributes.ScopeMask)] [InlineData(MemberAttributes.New)] [InlineData(MemberAttributes.VTableMask)] [InlineData(MemberAttributes.Overloaded)] [InlineData(MemberAttributes.AccessMask)] public void OutputMemberAccessModifier_InvokeWithoutOutputInvalid_Nop(MemberAttributes attributes) { CodeGeneratorTests generator = this; generator.OutputMemberAccessModifierAction = (actualAttributes, baseMethod) => baseMethod(actualAttributes); generator.OutputMemberAccessModifier(attributes); } [Theory] [InlineData(MemberAttributes.Abstract, "abstract ")] [InlineData(MemberAttributes.Final, "")] [InlineData(MemberAttributes.Static, "static ")] [InlineData(MemberAttributes.Override, "override ")] [InlineData(MemberAttributes.Const, "")] [InlineData(MemberAttributes.ScopeMask, "")] [InlineData(MemberAttributes.New, "new ")] [InlineData(MemberAttributes.VTableMask, "")] [InlineData(MemberAttributes.Overloaded, "")] [InlineData(MemberAttributes.Assembly, "")] [InlineData(MemberAttributes.FamilyAndAssembly, "")] [InlineData(MemberAttributes.Family, "virtual ")] [InlineData(MemberAttributes.FamilyOrAssembly, "")] [InlineData(MemberAttributes.Private, "")] [InlineData(MemberAttributes.Public, "virtual ")] [InlineData(MemberAttributes.AccessMask, "")] [InlineData(MemberAttributes.New | MemberAttributes.Assembly, "new ")] [InlineData(MemberAttributes.New | MemberAttributes.FamilyAndAssembly, "new ")] [InlineData(MemberAttributes.New | MemberAttributes.Family, "new virtual ")] [InlineData(MemberAttributes.New | MemberAttributes.FamilyOrAssembly, "new ")] [InlineData(MemberAttributes.New | MemberAttributes.Private, "new ")] [InlineData(MemberAttributes.New | MemberAttributes.Public, "new virtual ")] [InlineData(MemberAttributes.New | MemberAttributes.Abstract | MemberAttributes.Assembly, "new abstract ")] [InlineData(MemberAttributes.New | MemberAttributes.Final | MemberAttributes.Assembly, "new ")] [InlineData(MemberAttributes.New | MemberAttributes.Static | MemberAttributes.Assembly, "new static ")] [InlineData(MemberAttributes.New | MemberAttributes.Override | MemberAttributes.Assembly, "new override ")] [InlineData(MemberAttributes.New | MemberAttributes.Const | MemberAttributes.Assembly, "new ")] [InlineData(MemberAttributes.New | MemberAttributes.Abstract | MemberAttributes.FamilyAndAssembly, "new abstract ")] [InlineData(MemberAttributes.New | MemberAttributes.Final | MemberAttributes.FamilyAndAssembly, "new ")] [InlineData(MemberAttributes.New | MemberAttributes.Static | MemberAttributes.FamilyAndAssembly, "new static ")] [InlineData(MemberAttributes.New | MemberAttributes.Override | MemberAttributes.FamilyAndAssembly, "new override ")] [InlineData(MemberAttributes.New | MemberAttributes.Const | MemberAttributes.FamilyAndAssembly, "new ")] [InlineData(MemberAttributes.New | MemberAttributes.Abstract | MemberAttributes.Family, "new abstract ")] [InlineData(MemberAttributes.New | MemberAttributes.Final | MemberAttributes.Family, "new ")] [InlineData(MemberAttributes.New | MemberAttributes.Static | MemberAttributes.Family, "new static ")] [InlineData(MemberAttributes.New | MemberAttributes.Override | MemberAttributes.Family, "new override ")] [InlineData(MemberAttributes.New | MemberAttributes.Const | MemberAttributes.Family, "new virtual ")] [InlineData(MemberAttributes.New | MemberAttributes.Abstract | MemberAttributes.FamilyOrAssembly, "new abstract ")] [InlineData(MemberAttributes.New | MemberAttributes.Final | MemberAttributes.FamilyOrAssembly, "new ")] [InlineData(MemberAttributes.New | MemberAttributes.Static | MemberAttributes.FamilyOrAssembly, "new static ")] [InlineData(MemberAttributes.New | MemberAttributes.Override | MemberAttributes.FamilyOrAssembly, "new override ")] [InlineData(MemberAttributes.New | MemberAttributes.Const | MemberAttributes.FamilyOrAssembly, "new ")] [InlineData(MemberAttributes.New | MemberAttributes.Abstract | MemberAttributes.Private, "new abstract ")] [InlineData(MemberAttributes.New | MemberAttributes.Final | MemberAttributes.Private, "new ")] [InlineData(MemberAttributes.New | MemberAttributes.Static | MemberAttributes.Private, "new static ")] [InlineData(MemberAttributes.New | MemberAttributes.Override | MemberAttributes.Private, "new override ")] [InlineData(MemberAttributes.New | MemberAttributes.Const | MemberAttributes.Private, "new ")] [InlineData(MemberAttributes.New | MemberAttributes.Abstract | MemberAttributes.Public, "new abstract ")] [InlineData(MemberAttributes.New | MemberAttributes.Final | MemberAttributes.Public, "new ")] [InlineData(MemberAttributes.New | MemberAttributes.Static | MemberAttributes.Public, "new static ")] [InlineData(MemberAttributes.New | MemberAttributes.Override | MemberAttributes.Public, "new override ")] [InlineData(MemberAttributes.New | MemberAttributes.Const | MemberAttributes.Public, "new virtual ")] public void OutputMemberScopeModifier_Invoke_Success(MemberAttributes attributes, string expected) { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { generator.OutputMemberScopeModifierAction = (actualAttributes, baseMethod) => baseMethod(actualAttributes); generator.OutputMemberScopeModifier(attributes); Assert.Equal(expected, writer.ToString()); }); } [Theory] [InlineData(MemberAttributes.Abstract)] [InlineData(MemberAttributes.Final)] [InlineData(MemberAttributes.Static)] [InlineData(MemberAttributes.Override)] [InlineData(MemberAttributes.New)] [InlineData(MemberAttributes.Family)] [InlineData(MemberAttributes.Public)] [InlineData(MemberAttributes.New | MemberAttributes.Assembly)] [InlineData(MemberAttributes.New | MemberAttributes.FamilyAndAssembly)] [InlineData(MemberAttributes.New | MemberAttributes.Family)] [InlineData(MemberAttributes.New | MemberAttributes.FamilyOrAssembly)] [InlineData(MemberAttributes.New | MemberAttributes.Private)] [InlineData(MemberAttributes.New | MemberAttributes.Public)] [InlineData(MemberAttributes.New | MemberAttributes.Abstract | MemberAttributes.Assembly)] [InlineData(MemberAttributes.New | MemberAttributes.Final | MemberAttributes.Assembly)] [InlineData(MemberAttributes.New | MemberAttributes.Static | MemberAttributes.Assembly)] [InlineData(MemberAttributes.New | MemberAttributes.Override | MemberAttributes.Assembly)] [InlineData(MemberAttributes.New | MemberAttributes.Const | MemberAttributes.Assembly)] [InlineData(MemberAttributes.New | MemberAttributes.Abstract | MemberAttributes.FamilyAndAssembly)] [InlineData(MemberAttributes.New | MemberAttributes.Final | MemberAttributes.FamilyAndAssembly)] [InlineData(MemberAttributes.New | MemberAttributes.Static | MemberAttributes.FamilyAndAssembly)] [InlineData(MemberAttributes.New | MemberAttributes.Override | MemberAttributes.FamilyAndAssembly)] [InlineData(MemberAttributes.New | MemberAttributes.Const | MemberAttributes.FamilyAndAssembly)] [InlineData(MemberAttributes.New | MemberAttributes.Abstract | MemberAttributes.Family)] [InlineData(MemberAttributes.New | MemberAttributes.Final | MemberAttributes.Family)] [InlineData(MemberAttributes.New | MemberAttributes.Static | MemberAttributes.Family)] [InlineData(MemberAttributes.New | MemberAttributes.Override | MemberAttributes.Family)] [InlineData(MemberAttributes.New | MemberAttributes.Const | MemberAttributes.Family)] [InlineData(MemberAttributes.New | MemberAttributes.Abstract | MemberAttributes.FamilyOrAssembly)] [InlineData(MemberAttributes.New | MemberAttributes.Final | MemberAttributes.FamilyOrAssembly)] [InlineData(MemberAttributes.New | MemberAttributes.Static | MemberAttributes.FamilyOrAssembly)] [InlineData(MemberAttributes.New | MemberAttributes.Override | MemberAttributes.FamilyOrAssembly)] [InlineData(MemberAttributes.New | MemberAttributes.Const | MemberAttributes.FamilyOrAssembly)] [InlineData(MemberAttributes.New | MemberAttributes.Abstract | MemberAttributes.Private)] [InlineData(MemberAttributes.New | MemberAttributes.Final | MemberAttributes.Private)] [InlineData(MemberAttributes.New | MemberAttributes.Static | MemberAttributes.Private)] [InlineData(MemberAttributes.New | MemberAttributes.Override | MemberAttributes.Private)] [InlineData(MemberAttributes.New | MemberAttributes.Const | MemberAttributes.Private)] [InlineData(MemberAttributes.New | MemberAttributes.Abstract | MemberAttributes.Public)] [InlineData(MemberAttributes.New | MemberAttributes.Final | MemberAttributes.Public)] [InlineData(MemberAttributes.New | MemberAttributes.Static | MemberAttributes.Public)] [InlineData(MemberAttributes.New | MemberAttributes.Override | MemberAttributes.Public)] [InlineData(MemberAttributes.New | MemberAttributes.Const | MemberAttributes.Public)] public void OutputMemberScopeModifier_InvokeWithoutOutput_ThrowsNullReferenceException(MemberAttributes attributes) { CodeGeneratorTests generator = this; generator.OutputMemberScopeModifierAction = (actualAttributes, baseMethod) => baseMethod(actualAttributes); Assert.Throws<NullReferenceException>(() => generator.OutputMemberScopeModifier(attributes)); } [Theory] [InlineData(MemberAttributes.Const)] [InlineData(MemberAttributes.ScopeMask)] [InlineData(MemberAttributes.VTableMask)] [InlineData(MemberAttributes.Overloaded)] [InlineData(MemberAttributes.Assembly)] [InlineData(MemberAttributes.FamilyAndAssembly)] [InlineData(MemberAttributes.FamilyOrAssembly)] [InlineData(MemberAttributes.Private)] [InlineData(MemberAttributes.AccessMask)] public void OutputMemberScopeModifier_InvokeWithoutOutputInvalid_Nop(MemberAttributes attributes) { CodeGeneratorTests generator = this; generator.OutputMemberScopeModifierAction = (actualAttributes, baseMethod) => baseMethod(actualAttributes); generator.OutputMemberScopeModifier(attributes); } [Theory] [InlineData(CodeBinaryOperatorType.Add, "+")] [InlineData(CodeBinaryOperatorType.Assign, "=")] [InlineData(CodeBinaryOperatorType.BitwiseAnd, "&")] [InlineData(CodeBinaryOperatorType.BitwiseOr, "|")] [InlineData(CodeBinaryOperatorType.BooleanAnd, "&&")] [InlineData(CodeBinaryOperatorType.BooleanOr, "||")] [InlineData(CodeBinaryOperatorType.Divide, "/")] [InlineData(CodeBinaryOperatorType.GreaterThan, ">")] [InlineData(CodeBinaryOperatorType.GreaterThanOrEqual, ">=")] [InlineData(CodeBinaryOperatorType.IdentityEquality, "==")] [InlineData(CodeBinaryOperatorType.IdentityInequality, "!=")] [InlineData(CodeBinaryOperatorType.LessThan, "<")] [InlineData(CodeBinaryOperatorType.LessThanOrEqual, "<=")] [InlineData(CodeBinaryOperatorType.Modulus, "%")] [InlineData(CodeBinaryOperatorType.Multiply, "*")] [InlineData(CodeBinaryOperatorType.Subtract, "-")] [InlineData(CodeBinaryOperatorType.ValueEquality, "==")] [InlineData(CodeBinaryOperatorType.Add - 1, "")] [InlineData(CodeBinaryOperatorType.GreaterThanOrEqual + 1, "")] public void OutputOperator_Invoke_Success(CodeBinaryOperatorType op, string expected) { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { generator.OutputOperatorAction = (actualOp, baseMethod) => baseMethod(actualOp); generator.OutputOperator(op); Assert.Equal(expected, writer.ToString()); }); } [Theory] [InlineData(CodeBinaryOperatorType.Add)] [InlineData(CodeBinaryOperatorType.Assign)] [InlineData(CodeBinaryOperatorType.BitwiseAnd)] [InlineData(CodeBinaryOperatorType.BitwiseOr)] [InlineData(CodeBinaryOperatorType.BooleanAnd)] [InlineData(CodeBinaryOperatorType.BooleanOr)] [InlineData(CodeBinaryOperatorType.Divide)] [InlineData(CodeBinaryOperatorType.GreaterThan)] [InlineData(CodeBinaryOperatorType.GreaterThanOrEqual)] [InlineData(CodeBinaryOperatorType.IdentityEquality)] [InlineData(CodeBinaryOperatorType.IdentityInequality)] [InlineData(CodeBinaryOperatorType.LessThan)] [InlineData(CodeBinaryOperatorType.LessThanOrEqual)] [InlineData(CodeBinaryOperatorType.Modulus)] [InlineData(CodeBinaryOperatorType.Multiply)] [InlineData(CodeBinaryOperatorType.Subtract)] [InlineData(CodeBinaryOperatorType.ValueEquality)] public void OutputOperator_InvokeWithoutOutput_ThrowsNullReferenceException(CodeBinaryOperatorType op) { CodeGeneratorTests generator = this; generator.OutputOperatorAction = (actualOp, baseMethod) => baseMethod(actualOp); Assert.Throws<NullReferenceException>(() => generator.OutputOperator(op)); } [Theory] [InlineData(CodeBinaryOperatorType.Add - 1)] [InlineData(CodeBinaryOperatorType.GreaterThanOrEqual + 1)] public void OutputOperator_InvokeWithoutOutputInvalidOp_Nop(CodeBinaryOperatorType op) { CodeGeneratorTests generator = this; generator.OutputOperatorAction = (actualOp, baseMethod) => baseMethod(actualOp); generator.OutputOperator(op); } public static IEnumerable<object[]> OutputParameter_TestData() { yield return new object[] { new CodeParameterDeclarationExpression[0], "" }; yield return new object[] { new CodeParameterDeclarationExpression[] { new CodeParameterDeclarationExpression() }, "Type " }; yield return new object[] { new CodeParameterDeclarationExpression[] { new CodeParameterDeclarationExpression("type1", "name1"), new CodeParameterDeclarationExpression("type2", "name2") }, "Type name1, Type name2" }; yield return new object[] { new CodeParameterDeclarationExpression[] { new CodeParameterDeclarationExpression(), new CodeParameterDeclarationExpression(), new CodeParameterDeclarationExpression(), new CodeParameterDeclarationExpression(), new CodeParameterDeclarationExpression(), new CodeParameterDeclarationExpression(), new CodeParameterDeclarationExpression(), new CodeParameterDeclarationExpression(), new CodeParameterDeclarationExpression(), new CodeParameterDeclarationExpression(), new CodeParameterDeclarationExpression(), new CodeParameterDeclarationExpression(), new CodeParameterDeclarationExpression(), new CodeParameterDeclarationExpression(), new CodeParameterDeclarationExpression() }, "Type , Type , Type , Type , Type , Type , Type , Type , Type , Type , Type , Type , Type , Type , Type " }; yield return new object[] { new CodeParameterDeclarationExpression[] { new CodeParameterDeclarationExpression(), new CodeParameterDeclarationExpression(), new CodeParameterDeclarationExpression(), new CodeParameterDeclarationExpression(), new CodeParameterDeclarationExpression(), new CodeParameterDeclarationExpression(), new CodeParameterDeclarationExpression(), new CodeParameterDeclarationExpression(), new CodeParameterDeclarationExpression(), new CodeParameterDeclarationExpression(), new CodeParameterDeclarationExpression(), new CodeParameterDeclarationExpression(), new CodeParameterDeclarationExpression(), new CodeParameterDeclarationExpression(), new CodeParameterDeclarationExpression(), new CodeParameterDeclarationExpression() }, $"{Environment.NewLine}Type , {Environment.NewLine}Type , {Environment.NewLine}Type , {Environment.NewLine}Type , {Environment.NewLine}Type , {Environment.NewLine}Type , {Environment.NewLine}Type , {Environment.NewLine}Type , {Environment.NewLine}Type , {Environment.NewLine}Type , {Environment.NewLine}Type , {Environment.NewLine}Type , {Environment.NewLine}Type , {Environment.NewLine}Type , {Environment.NewLine}Type , {Environment.NewLine}Type " }; } [Theory] [MemberData(nameof(OutputParameter_TestData))] public void OutputParameter_Invoke_Success(CodeParameterDeclarationExpression[] parametersArray, string expected) { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { var parameters = new CodeParameterDeclarationExpressionCollection(parametersArray); generator.OutputParametersAction = (actualParameters, baseMethod) => baseMethod(actualParameters); int generateParameterDeclarationExpressionCallCount = 0; int outputTypeActionCallCount = 0; generator.GenerateParameterDeclarationExpressionAction = (actualE, baseMethod) => { baseMethod(actualE); Assert.Same(parameters[generateParameterDeclarationExpressionCallCount], actualE); generateParameterDeclarationExpressionCallCount++; }; generator.OutputDirectionAction = (actualDirection, baseMethod) => baseMethod(actualDirection); generator.OutputTypeNamePairAction = (actualTypeRef, actualName, baseMethod) => baseMethod(actualTypeRef, actualName); generator.OutputTypeAction = (actualTypeRef) => { Assert.Same(parameters[generateParameterDeclarationExpressionCallCount].Type, actualTypeRef); writer.Write("Type"); outputTypeActionCallCount++; }; generator.OutputIdentifierAction = (actualTypeRef, baseMethod) => baseMethod(actualTypeRef); generator.OutputParameters(parameters); Assert.Equal(expected, writer.ToString()); Assert.Equal(parameters.Count, generateParameterDeclarationExpressionCallCount); Assert.Equal(parameters.Count, outputTypeActionCallCount); // Call again to make sure indent is reset. Assert.Equal(expected, writer.ToString()); Assert.Equal(parameters.Count, generateParameterDeclarationExpressionCallCount); Assert.Equal(parameters.Count, outputTypeActionCallCount); }); } [Fact] public void OutputParameters_EmptyParametersWithoutWriter_Nop() { CodeGeneratorTests generator = this; var parameters = new CodeParameterDeclarationExpressionCollection(); generator.OutputParametersAction = (actualParameters, baseMethod) => baseMethod(actualParameters); generator.OutputParameters(parameters); } [Fact] public void OutputParameters_InvokeWithoutWriter_ThrowsNullReferenceException() { CodeGeneratorTests generator = this; var parameters = new CodeParameterDeclarationExpressionCollection(new CodeParameterDeclarationExpression[] { new CodeParameterDeclarationExpression() }); generator.OutputParametersAction = (actualParameters, baseMethod) => baseMethod(actualParameters); int generateParameterDeclarationExpressionCallCount = 0; generator.GenerateParameterDeclarationExpressionAction = (actualE, baseMethod) => { baseMethod(actualE); Assert.Same(parameters[generateParameterDeclarationExpressionCallCount], actualE); generateParameterDeclarationExpressionCallCount++; }; generator.OutputDirectionAction = (actualDirection, baseMethod) => baseMethod(actualDirection); generator.OutputTypeNamePairAction = (actualTypeRef, actualName, baseMethod) => baseMethod(actualTypeRef, actualName); generator.OutputTypeAction = (actualTypeRef) => { }; Assert.Throws<NullReferenceException>(() => generator.OutputParameters(parameters)); } [Fact] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Not fixed on NetFX")] public void OutputParameters_NullParameters_ThrowsArgumentNullException() { CodeGeneratorTests generator = this; generator.OutputParametersAction = (actualParameters, baseMethod) => baseMethod(actualParameters); Assert.Throws<ArgumentNullException>("parameters", () => generator.OutputParameters(null)); } public static IEnumerable<object[]> OutputTypeNamePair_TestData() { yield return new object[] { null, null }; yield return new object[] { new CodeTypeReference(), "" }; yield return new object[] { new CodeTypeReference(), "name" }; } [Theory] [MemberData(nameof(OutputTypeNamePair_TestData))] public void OutputTypeNamePair_Invoke_Success(CodeTypeReference typeRef, string name) { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { generator.OutputTypeNamePairAction = (actualTypeRef, actualName, baseMethod) => baseMethod(actualTypeRef, actualName); int outputTypeCallCount = 0; int outputIdentifierCallCount = 0; generator.OutputTypeAction = (actualTypeRef) => { Assert.Same(typeRef, actualTypeRef); Assert.Equal(0, outputIdentifierCallCount); writer.Write("Type"); outputTypeCallCount++; }; generator.OutputIdentifierAction = (actualIdent, baseMethod) => { baseMethod(actualIdent); outputIdentifierCallCount++; }; generator.OutputTypeNamePair(typeRef, name); Assert.Equal($"Type {name}", writer.ToString()); Assert.Equal(1, outputTypeCallCount); Assert.Equal(1, outputIdentifierCallCount); }); } [Fact] public void OutputTypeNamePair_InvokeWithoutOutput_ThrowsNullReferenceException() { CodeGeneratorTests generator = this; var typeRef = new CodeTypeReference(); generator.OutputTypeNamePairAction = (actualTypeRef, actualName, baseMethod) => baseMethod(actualTypeRef, actualName); generator.OutputTypeAction = (actualTypeRef) => {}; Assert.Throws<NullReferenceException>(() => generator.OutputTypeNamePair(typeRef, "name")); } [Theory] [InlineData(TypeAttributes.NotPublic, false, false, "class ")] [InlineData(TypeAttributes.NotPublic, true, false, "struct ")] [InlineData(TypeAttributes.NotPublic, true, true, "struct ")] [InlineData(TypeAttributes.NotPublic, false, true, "enum ")] [InlineData(TypeAttributes.Public, false, false, "public class ")] [InlineData(TypeAttributes.Public, true, false, "public struct ")] [InlineData(TypeAttributes.Public, true, true, "public struct ")] [InlineData(TypeAttributes.Public, false, true, "public enum ")] [InlineData(TypeAttributes.NestedPublic, false, false, "public class ")] [InlineData(TypeAttributes.NestedPublic, true, false, "public struct ")] [InlineData(TypeAttributes.NestedPublic, true, true, "public struct ")] [InlineData(TypeAttributes.NestedPublic, false, true, "public enum ")] [InlineData(TypeAttributes.NestedPrivate, false, false, "private class ")] [InlineData(TypeAttributes.NestedPrivate, true, false, "private struct ")] [InlineData(TypeAttributes.NestedPrivate, true, true, "private struct ")] [InlineData(TypeAttributes.NestedPrivate, false, true, "private enum ")] [InlineData(TypeAttributes.NestedFamily, false, false, "class ")] [InlineData(TypeAttributes.NestedAssembly, false, false, "class ")] [InlineData(TypeAttributes.NestedFamANDAssem, false, false, "class ")] [InlineData(TypeAttributes.NestedFamORAssem, false, false, "class ")] [InlineData(TypeAttributes.SequentialLayout, false, false, "class ")] [InlineData(TypeAttributes.ExplicitLayout, false, false, "class ")] [InlineData(TypeAttributes.LayoutMask, false, false, "class ")] [InlineData(TypeAttributes.Interface, false, false, "interface ")] [InlineData(TypeAttributes.Abstract, false, false, "abstract class ")] [InlineData(TypeAttributes.Abstract, true, false, "struct ")] [InlineData(TypeAttributes.Abstract, true, true, "struct ")] [InlineData(TypeAttributes.Abstract, false, true, "enum ")] [InlineData(TypeAttributes.Sealed, false, false, "sealed class ")] [InlineData(TypeAttributes.Sealed, true, false, "struct ")] [InlineData(TypeAttributes.Sealed, true, true, "struct ")] [InlineData(TypeAttributes.Sealed, false, true, "enum ")] [InlineData(TypeAttributes.SpecialName, false, false, "class ")] [InlineData(TypeAttributes.RTSpecialName, false, false, "class ")] [InlineData(TypeAttributes.Import, false, false, "class ")] [InlineData(TypeAttributes.Serializable, false, false, "class ")] [InlineData(TypeAttributes.WindowsRuntime, false, false, "class ")] [InlineData(TypeAttributes.UnicodeClass, false, false, "class ")] [InlineData(TypeAttributes.AutoClass, false, false, "class ")] [InlineData(TypeAttributes.CustomFormatClass, false, false, "class ")] [InlineData(TypeAttributes.HasSecurity, false, false, "class ")] [InlineData(TypeAttributes.ReservedMask, false, false, "class ")] [InlineData(TypeAttributes.BeforeFieldInit, false, false, "class ")] [InlineData(TypeAttributes.CustomFormatMask, false, false, "class ")] public void OutputTypeAttributes_Invoke_Success(TypeAttributes attributes, bool isStruct, bool isEnum, string expected) { CodeGeneratorTests generator = this; PerformActionWithOutput(writer => { generator.OutputTypeAttributesAction = (actualAttributes, isStruct, isEnum, baseMethod) => baseMethod(actualAttributes, isStruct, isEnum); generator.OutputTypeAttributes(attributes, isStruct, isEnum); Assert.Equal(expected, writer.ToString()); }); } [Theory] [InlineData(TypeAttributes.NotPublic, false, false)] [InlineData(TypeAttributes.NotPublic, true, false)] [InlineData(TypeAttributes.NotPublic, true, true)] [InlineData(TypeAttributes.NotPublic, false, true)] [InlineData(TypeAttributes.Public, false, false)] [InlineData(TypeAttributes.Public, true, false)] [InlineData(TypeAttributes.Public, true, true)] [InlineData(TypeAttributes.Public, false, true)] [InlineData(TypeAttributes.NestedPublic, false, false)] [InlineData(TypeAttributes.NestedPublic, true, false)] [InlineData(TypeAttributes.NestedPublic, true, true)] [InlineData(TypeAttributes.NestedPublic, false, true)] [InlineData(TypeAttributes.NestedPrivate, false, false)] [InlineData(TypeAttributes.NestedPrivate, true, false)] [InlineData(TypeAttributes.NestedPrivate, true, true)] [InlineData(TypeAttributes.NestedPrivate, false, true)] [InlineData(TypeAttributes.NestedFamily, false, false)] [InlineData(TypeAttributes.NestedAssembly, false, false)] [InlineData(TypeAttributes.NestedFamANDAssem, false, false)] [InlineData(TypeAttributes.NestedFamORAssem, false, false)] [InlineData(TypeAttributes.SequentialLayout, false, false)] [InlineData(TypeAttributes.ExplicitLayout, false, false)] [InlineData(TypeAttributes.LayoutMask, false, false)] [InlineData(TypeAttributes.Interface, false, false)] [InlineData(TypeAttributes.Abstract, false, false)] [InlineData(TypeAttributes.Abstract, true, false)] [InlineData(TypeAttributes.Abstract, true, true)] [InlineData(TypeAttributes.Abstract, false, true)] [InlineData(TypeAttributes.Sealed, false, false)] [InlineData(TypeAttributes.Sealed, true, false)] [InlineData(TypeAttributes.Sealed, true, true)] [InlineData(TypeAttributes.Sealed, false, true)] [InlineData(TypeAttributes.SpecialName, false, false)] [InlineData(TypeAttributes.RTSpecialName, false, false)] [InlineData(TypeAttributes.Import, false, false)] [InlineData(TypeAttributes.Serializable, false, false)] [InlineData(TypeAttributes.WindowsRuntime, false, false)] [InlineData(TypeAttributes.UnicodeClass, false, false)] [InlineData(TypeAttributes.AutoClass, false, false)] [InlineData(TypeAttributes.CustomFormatClass, false, false)] [InlineData(TypeAttributes.HasSecurity, false, false)] [InlineData(TypeAttributes.ReservedMask, false, false)] [InlineData(TypeAttributes.BeforeFieldInit, false, false)] [InlineData(TypeAttributes.CustomFormatMask, false, false)] public void OutputTypeAttributes_InvokeWithoutWriter_ThrowsNullReferenceException(TypeAttributes attributes, bool isStruct, bool isEnum) { CodeGeneratorTests generator = this; generator.OutputTypeAttributesAction = (actualAttributes, isStruct, isEnum, baseMethod) => baseMethod(actualAttributes, isStruct, isEnum); Assert.Throws<NullReferenceException>(() => generator.OutputTypeAttributes(attributes, isStruct, isEnum)); } [Theory] [InlineData(null)] [InlineData("")] [InlineData("value")] public void ValidateIdentifier_InvokeValid_Nop(string value) { CodeGeneratorTests generator = this; int isValidIdentifierCallCount = 0; generator.ValidateIdentifierAction = (actualValue, baseMethod) => baseMethod(actualValue); generator.IsValidIdentifierAction = (actualValue) => { Assert.Same(value, actualValue); isValidIdentifierCallCount++; return true; }; generator.ValidateIdentifier(value); Assert.Equal(1, isValidIdentifierCallCount); } [Theory] [InlineData(null)] [InlineData("")] [InlineData("value")] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Not fixed on NetFX")] public void ValidateIdentifier_InvokeInvalid_ThrowsArgumentException(string value) { CodeGeneratorTests generator = this; int isValidIdentifierCallCount = 0; generator.ValidateIdentifierAction = (actualValue, baseMethod) => baseMethod(actualValue); generator.IsValidIdentifierAction = (actualValue) => { Assert.Same(value, actualValue); isValidIdentifierCallCount++; return false; }; Assert.Throws<ArgumentException>("value", () => generator.ValidateIdentifier(value)); Assert.Equal(1, isValidIdentifierCallCount); } [Theory] [InlineData(null, null)] [InlineData(null, "")] [InlineData(null, "identifier")] [InlineData("", null)] [InlineData("", "")] [InlineData("", "identifier")] [InlineData("identifier", null)] [InlineData("identifier", "")] [InlineData("identifier", "escapedIdentifier")] public void ICodeGeneratorCreateEscapedIdentifier_Invoke_ReturnsExpected(string value, string result) { CodeGeneratorTests generator = this; int callCount = 0; generator.CreateEscapedIdentifierAction = (actualValue) => { Assert.Same(value, actualValue); callCount++; return result; }; ICodeGenerator iCodeGenerator = generator; Assert.Equal(result, iCodeGenerator.CreateEscapedIdentifier(value)); Assert.Equal(1, callCount); } [Theory] [InlineData(null, null)] [InlineData(null, "")] [InlineData(null, "identifier")] [InlineData("", null)] [InlineData("", "")] [InlineData("", "identifier")] [InlineData("identifier", null)] [InlineData("identifier", "")] [InlineData("identifier", "validIdentifier")] public void ICodeGeneratorCreateValidIdentifier_Invoke_ReturnsExpected(string value, string result) { CodeGeneratorTests generator = this; int callCount = 0; generator.CreateValidIdentifierAction = (actualValue) => { Assert.Same(value, actualValue); callCount++; return result; }; ICodeGenerator iCodeGenerator = generator; Assert.Equal(result, iCodeGenerator.CreateValidIdentifier(value)); Assert.Equal(1, callCount); } public static IEnumerable<object[]> GetTypeOutput_TestData() { yield return new object[] { null, null }; yield return new object[] { null, string.Empty }; yield return new object[] { null, "Output" }; yield return new object[] { new CodeTypeReference(), null }; yield return new object[] { new CodeTypeReference(), string.Empty }; yield return new object[] { new CodeTypeReference(), "Output" }; } [Theory] [MemberData(nameof(GetTypeOutput_TestData))] public void ICodeGeneratorGetTypeOutput_Invoke_ReturnsExpected(CodeTypeReference value, string result) { CodeGeneratorTests generator = this; int callCount = 0; generator.GetTypeOutputAction = (actualValue) => { Assert.Same(value, actualValue); callCount++; return result; }; ICodeGenerator iCodeGenerator = generator; Assert.Equal(result, iCodeGenerator.GetTypeOutput(value)); Assert.Equal(1, callCount); } public static IEnumerable<object[]> IsValidIdentifier_TestData() { foreach (bool result in new bool[] { true, false }) { yield return new object[] { null, result }; yield return new object[] { "", result }; yield return new object[] { "value", result }; } } [Theory] [MemberData(nameof(IsValidIdentifier_TestData))] public void ICodeGeneratorIsValidIdentifier_Invoke_ReturnsExpected(string value, bool result) { CodeGeneratorTests generator = this; int callCount = 0; generator.IsValidIdentifierAction = (actualValue) => { Assert.Same(value, actualValue); callCount++; return result; }; ICodeGenerator iCodeGenerator = generator; Assert.Equal(result, iCodeGenerator.IsValidIdentifier(value)); Assert.Equal(1, callCount); } public static IEnumerable<object[]> Supports_TestData() { foreach (bool result in new bool[] { true, false }) { yield return new object[] { GeneratorSupport.ArraysOfArrays - 1, result }; yield return new object[] { GeneratorSupport.AssemblyAttributes, result }; } } [Theory] [MemberData(nameof(Supports_TestData))] public void ICodeGeneratorSupports_Invoke_ReturnsExpected(GeneratorSupport support, bool result) { CodeGeneratorTests generator = this; int callCount = 0; generator.SupportsAction = (actualSupport) => { Assert.Equal(support, actualSupport); callCount++; return result; }; ICodeGenerator iCodeGenerator = generator; Assert.Equal(result, iCodeGenerator.Supports(support)); Assert.Equal(1, callCount); } [Theory] [InlineData(null)] [InlineData("")] [InlineData("value")] public void ICodeGeneratorValidateIdentifier_InvokeValid_Nop(string value) { CodeGeneratorTests generator = this; int isValidIdentifierCallCount = 0; generator.ValidateIdentifierAction = (actualValue, baseMethod) => baseMethod(actualValue); generator.IsValidIdentifierAction = (actualValue) => { Assert.Same(value, actualValue); isValidIdentifierCallCount++; return true; }; ICodeGenerator iCodeGenerator = generator; iCodeGenerator.ValidateIdentifier(value); Assert.Equal(1, isValidIdentifierCallCount); } [Theory] [InlineData(null)] [InlineData("")] [InlineData("value")] [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Not fixed on NetFX")] public void ICodeGeneratorValidateIdentifier_InvokeInvalid_ThrowsArgumentException(string value) { CodeGeneratorTests generator = this; int isValidIdentifierCallCount = 0; generator.ValidateIdentifierAction = (actualValue, baseMethod) => baseMethod(actualValue); generator.IsValidIdentifierAction = (actualValue) => { Assert.Same(value, actualValue); isValidIdentifierCallCount++; return false; }; ICodeGenerator iCodeGenerator = generator; Assert.Throws<ArgumentException>("value", () => iCodeGenerator.ValidateIdentifier(value)); Assert.Equal(1, isValidIdentifierCallCount); } private void PerformActionWithOutput(Action<StringWriter> action, CodeGeneratorOptions options = null) { CodeGeneratorTests generator = this; ICodeGenerator iCodeGenerator = generator; var e = new CodeArrayCreateExpression(typeof(int)); var writer = new StringWriter(); int callCount = 0; generator.GenerateArrayCreateExpressionAction = (actualE) => { Assert.Same(e, actualE); Assert.Equal(0, generator.Indent); Assert.NotNull(generator.Output); if (options != null) { Assert.Same(options, generator.Options); } else { Assert.NotNull(generator.Options); } action(writer); callCount++; }; iCodeGenerator.GenerateCodeFromExpression(e, writer, options); Assert.Equal(1, callCount); } protected override string NullToken => "NullToken"; public Func<string, string> CreateEscapedIdentifierAction { get; set; } protected override string CreateEscapedIdentifier(string value) { return CreateEscapedIdentifierAction(value); } public Func<string, string> CreateValidIdentifierAction { get; set; } protected override string CreateValidIdentifier(string value) { return CreateValidIdentifierAction(value); } public Action<CodeArgumentReferenceExpression> GenerateArgumentReferenceExpressionAction { get; set; } protected override void GenerateArgumentReferenceExpression(CodeArgumentReferenceExpression e) { GenerateArgumentReferenceExpressionAction(e); } public Action<CodeArrayCreateExpression> GenerateArrayCreateExpressionAction { get; set; } protected override void GenerateArrayCreateExpression(CodeArrayCreateExpression e) { GenerateArrayCreateExpressionAction(e); } public Action<CodeArrayIndexerExpression> GenerateArrayIndexerExpressionAction { get; set; } protected override void GenerateArrayIndexerExpression(CodeArrayIndexerExpression e) { GenerateArrayIndexerExpressionAction(e); } public Action<CodeAssignStatement> GenerateAssignStatementAction { get; set; } protected override void GenerateAssignStatement(CodeAssignStatement e) { GenerateAssignStatementAction(e); } public Action<CodeAttachEventStatement> GenerateAttachEventStatementAction { get; set; } protected override void GenerateAttachEventStatement(CodeAttachEventStatement e) { GenerateAttachEventStatementAction(e); } public Action<CodeAttributeDeclarationCollection> GenerateAttributeDeclarationsEndAction { get; set; } protected override void GenerateAttributeDeclarationsEnd(CodeAttributeDeclarationCollection attributes) { GenerateAttributeDeclarationsEndAction(attributes); } public Action<CodeAttributeDeclarationCollection> GenerateAttributeDeclarationsStartAction { get; set; } protected override void GenerateAttributeDeclarationsStart(CodeAttributeDeclarationCollection attributes) { GenerateAttributeDeclarationsStartAction(attributes); } public Action<CodeBaseReferenceExpression> GenerateBaseReferenceExpressionAction { get; set; } protected override void GenerateBaseReferenceExpression(CodeBaseReferenceExpression e) { GenerateBaseReferenceExpressionAction(e); } public Action<CodeCastExpression> GenerateCastExpressionAction { get; set; } protected override void GenerateCastExpression(CodeCastExpression e) { GenerateCastExpressionAction(e); } public Action<CodeComment> GenerateCommentAction { get; set; } protected override void GenerateComment(CodeComment e) { GenerateCommentAction(e); } public Action<CodeCommentStatementCollection, Action<CodeCommentStatementCollection>> GenerateCommentStatementsAction { get; set; } protected override void GenerateCommentStatements(CodeCommentStatementCollection e) { if (e != null) { if (e.GetEnumerator().MoveNext()) { GenerateCommentStatementsAction(e, base.GenerateCommentStatements); } } else { GenerateCommentStatementsAction(e, base.GenerateCommentStatements); } } public Action<CodeCompileUnit, Action<CodeCompileUnit>> GenerateCompileUnitAction { get; set; } protected override void GenerateCompileUnit(CodeCompileUnit e) { GenerateCompileUnitAction(e, base.GenerateCompileUnit); } public Action<CodeCompileUnit, Action<CodeCompileUnit>> GenerateCompileUnitEndAction { get; set; } protected override void GenerateCompileUnitEnd(CodeCompileUnit e) { GenerateCompileUnitEndAction(e, base.GenerateCompileUnitEnd); } public Action<CodeCompileUnit, Action<CodeCompileUnit>> GenerateCompileUnitStartAction { get; set; } protected override void GenerateCompileUnitStart(CodeCompileUnit e) { GenerateCompileUnitStartAction(e, base.GenerateCompileUnitStart); } public Action<CodeConditionStatement> GenerateConditionStatementAction { get; set; } protected override void GenerateConditionStatement(CodeConditionStatement e) { GenerateConditionStatementAction(e); } public Action<CodeConstructor, CodeTypeDeclaration> GenerateConstructorAction { get; set; } protected override void GenerateConstructor(CodeConstructor e, CodeTypeDeclaration c) { GenerateConstructorAction(e, c); } public Action<decimal, Action<decimal>> GenerateDecimalValueAction { get; set; } protected override void GenerateDecimalValue(decimal d) { GenerateDecimalValueAction(d, base.GenerateDecimalValue); } public Action<CodeDefaultValueExpression, Action<CodeDefaultValueExpression>> GenerateDefaultValueExpressionAction { get; set; } protected override void GenerateDefaultValueExpression(CodeDefaultValueExpression e) { GenerateDefaultValueExpressionAction(e, base.GenerateDefaultValueExpression); } public Action<CodeDelegateCreateExpression> GenerateDelegateCreateExpressionAction { get; set; } protected override void GenerateDelegateCreateExpression(CodeDelegateCreateExpression e) { GenerateDelegateCreateExpressionAction(e); } public Action<CodeDelegateInvokeExpression> GenerateDelegateInvokeExpressionAction { get; set; } protected override void GenerateDelegateInvokeExpression(CodeDelegateInvokeExpression e) { GenerateDelegateInvokeExpressionAction(e); } public Action<CodeDirectionExpression, Action<CodeDirectionExpression>> GenerateDirectionExpressionAction { get; set; } protected override void GenerateDirectionExpression(CodeDirectionExpression e) { GenerateDirectionExpressionAction(e, base.GenerateDirectionExpression); } public Action<CodeDirectiveCollection, Action<CodeDirectiveCollection>> GenerateDirectivesAction { get; set; } protected override void GenerateDirectives(CodeDirectiveCollection directives) { if (directives != null && directives.GetEnumerator().MoveNext()) { GenerateDirectivesAction(directives, base.GenerateDirectives); } } public Action<double, Action<double>> GenerateDoubleValueAction { get; set; } protected override void GenerateDoubleValue(double d) { GenerateDoubleValueAction(d, base.GenerateDoubleValue); } public Action<CodeEntryPointMethod, CodeTypeDeclaration> GenerateEntryPointMethodAction { get; set; } protected override void GenerateEntryPointMethod(CodeEntryPointMethod e, CodeTypeDeclaration c) { GenerateEntryPointMethodAction(e, c); } public Action<CodeMemberEvent, CodeTypeDeclaration> GenerateEventAction { get; set; } protected override void GenerateEvent(CodeMemberEvent e, CodeTypeDeclaration c) { GenerateEventAction(e, c); } public Action<CodeEventReferenceExpression> GenerateEventReferenceExpressionAction { get; set; } protected override void GenerateEventReferenceExpression(CodeEventReferenceExpression e) { GenerateEventReferenceExpressionAction(e); } public Action<CodeExpressionStatement> GenerateExpressionStatementAction { get; set; } protected override void GenerateExpressionStatement(CodeExpressionStatement e) { GenerateExpressionStatementAction(e); } public Action<CodeMemberField> GenerateFieldAction { get; set; } protected override void GenerateField(CodeMemberField e) { GenerateFieldAction(e); } public Action<CodeFieldReferenceExpression> GenerateFieldReferenceExpressionAction { get; set; } protected override void GenerateFieldReferenceExpression(CodeFieldReferenceExpression e) { GenerateFieldReferenceExpressionAction(e); } public Action<CodeGotoStatement> GenerateGotoStatementAction { get; set; } protected override void GenerateGotoStatement(CodeGotoStatement e) { GenerateGotoStatementAction(e); } public Action<CodeIndexerExpression> GenerateIndexerExpressionAction { get; set; } protected override void GenerateIndexerExpression(CodeIndexerExpression e) { GenerateIndexerExpressionAction(e); } public Action<CodeIterationStatement> GenerateIterationStatementAction { get; set; } protected override void GenerateIterationStatement(CodeIterationStatement e) { GenerateIterationStatementAction(e); } public Action<CodeLabeledStatement> GenerateLabeledStatementAction { get; set; } protected override void GenerateLabeledStatement(CodeLabeledStatement e) { GenerateLabeledStatementAction(e); } public Action<CodeLinePragma> GenerateLinePragmaEndAction { get; set; } protected override void GenerateLinePragmaEnd(CodeLinePragma e) { GenerateLinePragmaEndAction(e); } public Action<CodeLinePragma> GenerateLinePragmaStartAction { get; set; } protected override void GenerateLinePragmaStart(CodeLinePragma e) { GenerateLinePragmaStartAction(e); } public Action<CodeMemberMethod, CodeTypeDeclaration> GenerateMethodAction { get; set; } protected override void GenerateMethod(CodeMemberMethod e, CodeTypeDeclaration c) { GenerateMethodAction(e, c); } public Action<CodeMethodInvokeExpression> GenerateMethodInvokeExpressionAction { get; set; } protected override void GenerateMethodInvokeExpression(CodeMethodInvokeExpression e) { GenerateMethodInvokeExpressionAction(e); } public Action<CodeMethodReferenceExpression> GenerateMethodReferenceExpressionAction { get; set; } protected override void GenerateMethodReferenceExpression(CodeMethodReferenceExpression e) { GenerateMethodReferenceExpressionAction(e); } public Action<CodeMethodReturnStatement> GenerateMethodReturnStatementAction { get; set; } protected override void GenerateMethodReturnStatement(CodeMethodReturnStatement e) { GenerateMethodReturnStatementAction(e); } public Action<CodeNamespace, Action<CodeNamespace>> GenerateNamespaceAction { get; set; } protected override void GenerateNamespace(CodeNamespace e) { GenerateNamespaceAction(e, base.GenerateNamespace); } public Action<CodeNamespace> GenerateNamespaceEndAction { get; set; } protected override void GenerateNamespaceEnd(CodeNamespace e) { GenerateNamespaceEndAction(e); } public Action<CodeNamespaceImport> GenerateNamespaceImportAction { get; set; } protected override void GenerateNamespaceImport(CodeNamespaceImport e) { GenerateNamespaceImportAction(e); } public Action<CodeNamespace> GenerateNamespaceStartAction { get; set; } protected override void GenerateNamespaceStart(CodeNamespace e) { GenerateNamespaceStartAction(e); } public Action<CodeObjectCreateExpression> GenerateObjectCreateExpressionAction { get; set; } protected override void GenerateObjectCreateExpression(CodeObjectCreateExpression e) { GenerateObjectCreateExpressionAction(e); } public Action<CodeParameterDeclarationExpression, Action<CodeParameterDeclarationExpression>> GenerateParameterDeclarationExpressionAction { get; set; } protected override void GenerateParameterDeclarationExpression(CodeParameterDeclarationExpression e) { GenerateParameterDeclarationExpressionAction(e, base.GenerateParameterDeclarationExpression); } public Action<CodePrimitiveExpression, Action<CodePrimitiveExpression>> GeneratePrimitiveExpressionAction { get; set; } protected override void GeneratePrimitiveExpression(CodePrimitiveExpression e) { GeneratePrimitiveExpressionAction(e, base.GeneratePrimitiveExpression); } public Action<CodeMemberProperty, CodeTypeDeclaration> GeneratePropertyAction { get; set; } protected override void GenerateProperty(CodeMemberProperty e, CodeTypeDeclaration c) { GeneratePropertyAction(e, c); } public Action<CodePropertyReferenceExpression> GeneratePropertyReferenceExpressionAction { get; set; } protected override void GeneratePropertyReferenceExpression(CodePropertyReferenceExpression e) { GeneratePropertyReferenceExpressionAction(e); } public Action<CodePropertySetValueReferenceExpression> GeneratePropertySetValueReferenceExpressionAction { get; set; } protected override void GeneratePropertySetValueReferenceExpression(CodePropertySetValueReferenceExpression e) { GeneratePropertySetValueReferenceExpressionAction(e); } public Action<CodeRemoveEventStatement> GenerateRemoveEventStatementAction { get; set; } protected override void GenerateRemoveEventStatement(CodeRemoveEventStatement e) { GenerateRemoveEventStatementAction(e); } public Action<float, Action<float>> GenerateSingleFloatValueAction { get; set; } protected override void GenerateSingleFloatValue(float s) { GenerateSingleFloatValueAction(s, base.GenerateSingleFloatValue); } public Action<CodeSnippetExpression> GenerateSnippetExpressionAction { get; set; } protected override void GenerateSnippetExpression(CodeSnippetExpression e) { GenerateSnippetExpressionAction(e); } public Action<CodeSnippetTypeMember> GenerateSnippetMemberAction { get; set; } protected override void GenerateSnippetMember(CodeSnippetTypeMember e) { GenerateSnippetMemberAction(e); } public Action<CodeSnippetStatement, Action<CodeSnippetStatement>> GenerateSnippetStatementAction { get; set; } protected override void GenerateSnippetStatement(CodeSnippetStatement e) { GenerateSnippetStatementAction(e, base.GenerateSnippetStatement); } public Action<CodeThisReferenceExpression> GenerateThisReferenceExpressionAction { get; set; } protected override void GenerateThisReferenceExpression(CodeThisReferenceExpression e) { GenerateThisReferenceExpressionAction(e); } public Action<CodeThrowExceptionStatement> GenerateThrowExceptionStatementAction { get; set; } protected override void GenerateThrowExceptionStatement(CodeThrowExceptionStatement e) { GenerateThrowExceptionStatementAction(e); } public Action<CodeTryCatchFinallyStatement> GenerateTryCatchFinallyStatementAction { get; set; } protected override void GenerateTryCatchFinallyStatement(CodeTryCatchFinallyStatement e) { GenerateTryCatchFinallyStatementAction(e); } public Action<CodeTypeConstructor> GenerateTypeConstructorAction { get; set; } protected override void GenerateTypeConstructor(CodeTypeConstructor e) { GenerateTypeConstructorAction(e); } public Action<CodeTypeDeclaration> GenerateTypeEndAction { get; set; } protected override void GenerateTypeEnd(CodeTypeDeclaration e) { GenerateTypeEndAction(e); } public Action<CodeTypeOfExpression, Action<CodeTypeOfExpression>> GenerateTypeOfExpressionAction { get; set; } protected override void GenerateTypeOfExpression(CodeTypeOfExpression e) { GenerateTypeOfExpressionAction(e, base.GenerateTypeOfExpression); } public Action<CodeTypeReferenceExpression, Action<CodeTypeReferenceExpression>> GenerateTypeReferenceExpressionAction { get; set; } protected override void GenerateTypeReferenceExpression(CodeTypeReferenceExpression e) { GenerateTypeReferenceExpressionAction(e, base.GenerateTypeReferenceExpression); } public Action<CodeTypeDeclaration> GenerateTypeStartAction { get; set; } protected override void GenerateTypeStart(CodeTypeDeclaration e) { GenerateTypeStartAction(e); } public Action<CodeVariableDeclarationStatement> GenerateVariableDeclarationStatementAction { get; set; } protected override void GenerateVariableDeclarationStatement(CodeVariableDeclarationStatement e) { GenerateVariableDeclarationStatementAction(e); } public Action<CodeVariableReferenceExpression> GenerateVariableReferenceExpressionAction { get; set; } protected override void GenerateVariableReferenceExpression(CodeVariableReferenceExpression e) { GenerateVariableReferenceExpressionAction(e); } public Func<CodeTypeReference, string> GetTypeOutputAction { get; set; } protected override string GetTypeOutput(CodeTypeReference value) { return GetTypeOutputAction(value); } public Func<string, bool> IsValidIdentifierAction { get; set; } protected override bool IsValidIdentifier(string value) { return IsValidIdentifierAction(value); } public Action<CodeAttributeArgument, Action<CodeAttributeArgument>> OutputAttributeArgumentAction { get; set; } protected override void OutputAttributeArgument(CodeAttributeArgument arg) { OutputAttributeArgumentAction(arg, base.OutputAttributeArgument); } public Action<CodeAttributeDeclarationCollection, Action<CodeAttributeDeclarationCollection>> OutputAttributeDeclarationsAction { get; set; } protected override void OutputAttributeDeclarations(CodeAttributeDeclarationCollection attributes) { OutputAttributeDeclarationsAction(attributes, base.OutputAttributeDeclarations); } public Action<FieldDirection, Action<FieldDirection>> OutputDirectionAction { get; set; } protected override void OutputDirection(FieldDirection dir) { OutputDirectionAction(dir, base.OutputDirection); } public Action<MemberAttributes, Action<MemberAttributes>> OutputFieldScopeModifierAction { get; set; } protected override void OutputFieldScopeModifier(MemberAttributes attributes) { OutputFieldScopeModifierAction(attributes, base.OutputFieldScopeModifier); } public Action<string, Action<string>> OutputIdentifierAction { get; set; } protected override void OutputIdentifier(string ident) { OutputIdentifierAction(ident, base.OutputIdentifier); } public Action<MemberAttributes, Action<MemberAttributes>> OutputMemberAccessModifierAction { get; set; } protected override void OutputMemberAccessModifier(MemberAttributes attributes) { OutputMemberAccessModifierAction(attributes, base.OutputMemberAccessModifier); } public Action<MemberAttributes, Action<MemberAttributes>> OutputMemberScopeModifierAction { get; set; } protected override void OutputMemberScopeModifier(MemberAttributes attributes) { OutputMemberScopeModifierAction(attributes, base.OutputMemberScopeModifier); } public Action<CodeBinaryOperatorType, Action<CodeBinaryOperatorType>> OutputOperatorAction { get; set; } protected override void OutputOperator(CodeBinaryOperatorType op) { OutputOperatorAction(op, base.OutputOperator); } public Action<CodeParameterDeclarationExpressionCollection, Action<CodeParameterDeclarationExpressionCollection>> OutputParametersAction { get; set; } protected override void OutputParameters(CodeParameterDeclarationExpressionCollection parameters) { OutputParametersAction(parameters, base.OutputParameters); } public Action<CodeTypeReference> OutputTypeAction { get; set; } protected override void OutputType(CodeTypeReference typeRef) { OutputTypeAction(typeRef); } public Action<TypeAttributes, bool, bool, Action<TypeAttributes, bool, bool>> OutputTypeAttributesAction { get; set; } protected override void OutputTypeAttributes(TypeAttributes attributes, bool isStruct, bool isEnum) { OutputTypeAttributesAction(attributes, isStruct, isEnum, base.OutputTypeAttributes); } public Action<CodeTypeReference, string, Action<CodeTypeReference, string>> OutputTypeNamePairAction { get; set; } protected override void OutputTypeNamePair(CodeTypeReference typeRef, string name) { OutputTypeNamePairAction(typeRef, name, base.OutputTypeNamePair); } public Func<string, string> QuoteSnippetStringAction { get; set; } protected override string QuoteSnippetString(string value) { return QuoteSnippetStringAction(value); } public Func<GeneratorSupport, bool> SupportsAction { get; set; } protected override bool Supports(GeneratorSupport support) { return SupportsAction(support); } public Action<string, Action<string>> ValidateIdentifierAction { get; set; } protected override void ValidateIdentifier(string value) { ValidateIdentifierAction(value, base.ValidateIdentifier); } private class CustomCodeExpression : CodeExpression { } private class CustomCodeStatement : CodeStatement { } } }
-1
dotnet/runtime
66,367
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled
When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
jakobbotsch
2022-03-09T00:01:53Z
2022-03-09T12:59:09Z
c9f7f7389e8e9a00d501aef696333b67d218baac
cef825fd5425203ac28d073bf9fccc33c638f179
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled. When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
./src/coreclr/vm/arm64/gmscpu.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /**************************************************************/ /* gmscpu.h */ /**************************************************************/ /* HelperFrame is defines 'GET_STATE(machState)' macro, which figures out what the state of the machine will be when the current method returns. It then stores the state in the JIT_machState structure. */ /**************************************************************/ #ifndef __gmscpu_h__ #define __gmscpu_h__ #define __gmscpu_h__ // X19 - X29 #define NUM_NONVOLATILE_CONTEXT_POINTERS 11 struct MachState { ULONG64 captureX19_X29[NUM_NONVOLATILE_CONTEXT_POINTERS]; // preserved registers PTR_ULONG64 ptrX19_X29[NUM_NONVOLATILE_CONTEXT_POINTERS]; // pointers to preserved registers TADDR _pc; // program counter after the function returns TADDR _sp; // stack pointer after the function returns BOOL _isValid; BOOL isValid() { LIMITED_METHOD_DAC_CONTRACT; return _isValid; } TADDR GetRetAddr() { LIMITED_METHOD_DAC_CONTRACT; return _pc; } }; struct LazyMachState : public MachState{ TADDR captureSp; // Stack pointer at the time of capture TADDR captureIp; // Instruction pointer at the time of capture void setLazyStateFromUnwind(MachState* copy); static void unwindLazyState(LazyMachState* baseState, MachState* lazyState, DWORD threadId, int funCallDepth = 1, HostCallPreference hostCallPreference = AllowHostCalls); }; inline void LazyMachState::setLazyStateFromUnwind(MachState* copy) { #if defined(DACCESS_COMPILE) // This function cannot be called in DAC because DAC cannot update target memory. DacError(E_FAIL); return; #else // !DACCESS_COMPILE _sp = copy->_sp; _pc = copy->_pc; // Capture* has already been set, so there is no need to touch it // loop over the nonvolatile context pointers and make // sure to properly copy interior pointers into the // new struct PULONG64* pSrc = (PULONG64 *)&copy->ptrX19_X29; PULONG64* pDst = (PULONG64 *)&this->ptrX19_X29; const PULONG64 LowerBoundDst = (PULONG64) this; const PULONG64 LowerBoundSrc = (PULONG64) copy; const PULONG64 UpperBoundSrc = (PULONG64) ((BYTE*)LowerBoundSrc + sizeof(*copy)); for (int i = 0; i < NUM_NONVOLATILE_CONTEXT_POINTERS; i++) { PULONG64 valueSrc = *pSrc++; if ((LowerBoundSrc <= valueSrc) && (valueSrc < UpperBoundSrc)) { // make any pointer interior to 'src' interior to 'dst' valueSrc = (PULONG64)((BYTE*)valueSrc - (BYTE*)LowerBoundSrc + (BYTE*)LowerBoundDst); } *pDst++ = valueSrc; captureX19_X29[i] = copy->captureX19_X29[i]; } // this has to be last because we depend on write ordering to // synchronize the race implicit in updating this struct VolatileStore(&_isValid, TRUE); #endif // DACCESS_COMPILE } // Do the initial capture of the machine state. This is meant to be // as light weight as possible, as we may never need the state that // we capture. EXTERN_C void LazyMachStateCaptureState(struct LazyMachState *pState); #define CAPTURE_STATE(machState, ret) \ LazyMachStateCaptureState(machState) #endif
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /**************************************************************/ /* gmscpu.h */ /**************************************************************/ /* HelperFrame is defines 'GET_STATE(machState)' macro, which figures out what the state of the machine will be when the current method returns. It then stores the state in the JIT_machState structure. */ /**************************************************************/ #ifndef __gmscpu_h__ #define __gmscpu_h__ #define __gmscpu_h__ // X19 - X29 #define NUM_NONVOLATILE_CONTEXT_POINTERS 11 struct MachState { ULONG64 captureX19_X29[NUM_NONVOLATILE_CONTEXT_POINTERS]; // preserved registers PTR_ULONG64 ptrX19_X29[NUM_NONVOLATILE_CONTEXT_POINTERS]; // pointers to preserved registers TADDR _pc; // program counter after the function returns TADDR _sp; // stack pointer after the function returns BOOL _isValid; BOOL isValid() { LIMITED_METHOD_DAC_CONTRACT; return _isValid; } TADDR GetRetAddr() { LIMITED_METHOD_DAC_CONTRACT; return _pc; } }; struct LazyMachState : public MachState{ TADDR captureSp; // Stack pointer at the time of capture TADDR captureIp; // Instruction pointer at the time of capture void setLazyStateFromUnwind(MachState* copy); static void unwindLazyState(LazyMachState* baseState, MachState* lazyState, DWORD threadId, int funCallDepth = 1, HostCallPreference hostCallPreference = AllowHostCalls); }; inline void LazyMachState::setLazyStateFromUnwind(MachState* copy) { #if defined(DACCESS_COMPILE) // This function cannot be called in DAC because DAC cannot update target memory. DacError(E_FAIL); return; #else // !DACCESS_COMPILE _sp = copy->_sp; _pc = copy->_pc; // Capture* has already been set, so there is no need to touch it // loop over the nonvolatile context pointers and make // sure to properly copy interior pointers into the // new struct PULONG64* pSrc = (PULONG64 *)&copy->ptrX19_X29; PULONG64* pDst = (PULONG64 *)&this->ptrX19_X29; const PULONG64 LowerBoundDst = (PULONG64) this; const PULONG64 LowerBoundSrc = (PULONG64) copy; const PULONG64 UpperBoundSrc = (PULONG64) ((BYTE*)LowerBoundSrc + sizeof(*copy)); for (int i = 0; i < NUM_NONVOLATILE_CONTEXT_POINTERS; i++) { PULONG64 valueSrc = *pSrc++; if ((LowerBoundSrc <= valueSrc) && (valueSrc < UpperBoundSrc)) { // make any pointer interior to 'src' interior to 'dst' valueSrc = (PULONG64)((BYTE*)valueSrc - (BYTE*)LowerBoundSrc + (BYTE*)LowerBoundDst); } *pDst++ = valueSrc; captureX19_X29[i] = copy->captureX19_X29[i]; } // this has to be last because we depend on write ordering to // synchronize the race implicit in updating this struct VolatileStore(&_isValid, TRUE); #endif // DACCESS_COMPILE } // Do the initial capture of the machine state. This is meant to be // as light weight as possible, as we may never need the state that // we capture. EXTERN_C void LazyMachStateCaptureState(struct LazyMachState *pState); #define CAPTURE_STATE(machState, ret) \ LazyMachStateCaptureState(machState) #endif
-1
dotnet/runtime
66,367
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled
When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
jakobbotsch
2022-03-09T00:01:53Z
2022-03-09T12:59:09Z
c9f7f7389e8e9a00d501aef696333b67d218baac
cef825fd5425203ac28d073bf9fccc33c638f179
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled. When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
./src/libraries/System.Security.Cryptography/src/System/Security/Cryptography/X509Certificates/Asn1/CertificateTemplateAsn.xml.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #pragma warning disable SA1028 // ignore whitespace warnings for generated code using System; using System.Formats.Asn1; using System.Runtime.InteropServices; namespace System.Security.Cryptography.X509Certificates.Asn1 { [StructLayout(LayoutKind.Sequential)] internal partial struct CertificateTemplateAsn { internal string TemplateID; internal int TemplateMajorVersion; internal int? TemplateMinorVersion; internal void Encode(AsnWriter writer) { Encode(writer, Asn1Tag.Sequence); } internal void Encode(AsnWriter writer, Asn1Tag tag) { writer.PushSequence(tag); try { writer.WriteObjectIdentifier(TemplateID); } catch (ArgumentException e) { throw new CryptographicException(SR.Cryptography_Der_Invalid_Encoding, e); } writer.WriteInteger(TemplateMajorVersion); if (TemplateMinorVersion.HasValue) { writer.WriteInteger(TemplateMinorVersion.Value); } writer.PopSequence(tag); } internal static CertificateTemplateAsn Decode(ReadOnlyMemory<byte> encoded, AsnEncodingRules ruleSet) { return Decode(Asn1Tag.Sequence, encoded, ruleSet); } internal static CertificateTemplateAsn Decode(Asn1Tag expectedTag, ReadOnlyMemory<byte> encoded, AsnEncodingRules ruleSet) { try { AsnValueReader reader = new AsnValueReader(encoded.Span, ruleSet); DecodeCore(ref reader, expectedTag, encoded, out CertificateTemplateAsn decoded); reader.ThrowIfNotEmpty(); return decoded; } catch (AsnContentException e) { throw new CryptographicException(SR.Cryptography_Der_Invalid_Encoding, e); } } internal static void Decode(ref AsnValueReader reader, ReadOnlyMemory<byte> rebind, out CertificateTemplateAsn decoded) { Decode(ref reader, Asn1Tag.Sequence, rebind, out decoded); } internal static void Decode(ref AsnValueReader reader, Asn1Tag expectedTag, ReadOnlyMemory<byte> rebind, out CertificateTemplateAsn decoded) { try { DecodeCore(ref reader, expectedTag, rebind, out decoded); } catch (AsnContentException e) { throw new CryptographicException(SR.Cryptography_Der_Invalid_Encoding, e); } } private static void DecodeCore(ref AsnValueReader reader, Asn1Tag expectedTag, ReadOnlyMemory<byte> rebind, out CertificateTemplateAsn decoded) { decoded = default; AsnValueReader sequenceReader = reader.ReadSequence(expectedTag); decoded.TemplateID = sequenceReader.ReadObjectIdentifier(); if (!sequenceReader.TryReadInt32(out decoded.TemplateMajorVersion)) { sequenceReader.ThrowIfNotEmpty(); } if (sequenceReader.HasData && sequenceReader.PeekTag().HasSameClassAndValue(Asn1Tag.Integer)) { if (sequenceReader.TryReadInt32(out int tmpTemplateMinorVersion)) { decoded.TemplateMinorVersion = tmpTemplateMinorVersion; } else { sequenceReader.ThrowIfNotEmpty(); } } sequenceReader.ThrowIfNotEmpty(); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #pragma warning disable SA1028 // ignore whitespace warnings for generated code using System; using System.Formats.Asn1; using System.Runtime.InteropServices; namespace System.Security.Cryptography.X509Certificates.Asn1 { [StructLayout(LayoutKind.Sequential)] internal partial struct CertificateTemplateAsn { internal string TemplateID; internal int TemplateMajorVersion; internal int? TemplateMinorVersion; internal void Encode(AsnWriter writer) { Encode(writer, Asn1Tag.Sequence); } internal void Encode(AsnWriter writer, Asn1Tag tag) { writer.PushSequence(tag); try { writer.WriteObjectIdentifier(TemplateID); } catch (ArgumentException e) { throw new CryptographicException(SR.Cryptography_Der_Invalid_Encoding, e); } writer.WriteInteger(TemplateMajorVersion); if (TemplateMinorVersion.HasValue) { writer.WriteInteger(TemplateMinorVersion.Value); } writer.PopSequence(tag); } internal static CertificateTemplateAsn Decode(ReadOnlyMemory<byte> encoded, AsnEncodingRules ruleSet) { return Decode(Asn1Tag.Sequence, encoded, ruleSet); } internal static CertificateTemplateAsn Decode(Asn1Tag expectedTag, ReadOnlyMemory<byte> encoded, AsnEncodingRules ruleSet) { try { AsnValueReader reader = new AsnValueReader(encoded.Span, ruleSet); DecodeCore(ref reader, expectedTag, encoded, out CertificateTemplateAsn decoded); reader.ThrowIfNotEmpty(); return decoded; } catch (AsnContentException e) { throw new CryptographicException(SR.Cryptography_Der_Invalid_Encoding, e); } } internal static void Decode(ref AsnValueReader reader, ReadOnlyMemory<byte> rebind, out CertificateTemplateAsn decoded) { Decode(ref reader, Asn1Tag.Sequence, rebind, out decoded); } internal static void Decode(ref AsnValueReader reader, Asn1Tag expectedTag, ReadOnlyMemory<byte> rebind, out CertificateTemplateAsn decoded) { try { DecodeCore(ref reader, expectedTag, rebind, out decoded); } catch (AsnContentException e) { throw new CryptographicException(SR.Cryptography_Der_Invalid_Encoding, e); } } private static void DecodeCore(ref AsnValueReader reader, Asn1Tag expectedTag, ReadOnlyMemory<byte> rebind, out CertificateTemplateAsn decoded) { decoded = default; AsnValueReader sequenceReader = reader.ReadSequence(expectedTag); decoded.TemplateID = sequenceReader.ReadObjectIdentifier(); if (!sequenceReader.TryReadInt32(out decoded.TemplateMajorVersion)) { sequenceReader.ThrowIfNotEmpty(); } if (sequenceReader.HasData && sequenceReader.PeekTag().HasSameClassAndValue(Asn1Tag.Integer)) { if (sequenceReader.TryReadInt32(out int tmpTemplateMinorVersion)) { decoded.TemplateMinorVersion = tmpTemplateMinorVersion; } else { sequenceReader.ThrowIfNotEmpty(); } } sequenceReader.ThrowIfNotEmpty(); } } }
-1
dotnet/runtime
66,367
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled
When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
jakobbotsch
2022-03-09T00:01:53Z
2022-03-09T12:59:09Z
c9f7f7389e8e9a00d501aef696333b67d218baac
cef825fd5425203ac28d073bf9fccc33c638f179
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled. When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
./src/mono/wasm/debugger/BrowserDebugHost/Startup.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Collections.Generic; using System.Diagnostics; using System.Linq; using System.Net.Http; using System.Text.Json; using System.Threading.Tasks; using Microsoft.AspNetCore.Builder; using Microsoft.AspNetCore.Hosting; using Microsoft.AspNetCore.Http; using Microsoft.AspNetCore.Routing; using Microsoft.Extensions.Configuration; using Microsoft.Extensions.DependencyInjection; using Microsoft.Extensions.Hosting; using Microsoft.Extensions.Logging; using Microsoft.Extensions.Options; using Microsoft.Extensions.Primitives; namespace Microsoft.WebAssembly.Diagnostics { internal class Startup { // This method gets called by the runtime. Use this method to add services to the container. // For more information on how to configure your application, visit https://go.microsoft.com/fwlink/?LinkID=398940 public void ConfigureServices(IServiceCollection services) => services.AddRouting() .Configure<ProxyOptions>(Configuration); public Startup(IConfiguration configuration) => Configuration = configuration; public IConfiguration Configuration { get; } // This method gets called by the runtime. Use this method to configure the HTTP request pipeline. public void Configure(IApplicationBuilder app, IOptionsMonitor<ProxyOptions> optionsAccessor, IWebHostEnvironment env, IHostApplicationLifetime applicationLifetime) { ProxyOptions options = optionsAccessor.CurrentValue; if (options.OwnerPid.HasValue) { Process ownerProcess = Process.GetProcessById(options.OwnerPid.Value); if (ownerProcess != null) { ownerProcess.EnableRaisingEvents = true; ownerProcess.Exited += (sender, eventArgs) => { applicationLifetime.StopApplication(); }; } } app.UseDeveloperExceptionPage() .UseWebSockets() .UseDebugProxy(options); } } internal static class DebugExtensions { public static Dictionary<string, string> MapValues(Dictionary<string, string> response, HttpContext context, Uri debuggerHost) { var filtered = new Dictionary<string, string>(); HttpRequest request = context.Request; foreach (string key in response.Keys) { switch (key) { case "devtoolsFrontendUrl": string front = response[key]; filtered[key] = $"{debuggerHost.Scheme}://{debuggerHost.Authority}{front.Replace($"ws={debuggerHost.Authority}", $"ws={request.Host}")}"; break; case "webSocketDebuggerUrl": var page = new Uri(response[key]); filtered[key] = $"{page.Scheme}://{request.Host}{page.PathAndQuery}"; break; default: filtered[key] = response[key]; break; } } return filtered; } public static IApplicationBuilder UseDebugProxy(this IApplicationBuilder app, ProxyOptions options) => UseDebugProxy(app, options, MapValues); public static IApplicationBuilder UseDebugProxy( this IApplicationBuilder app, ProxyOptions options, Func<Dictionary<string, string>, HttpContext, Uri, Dictionary<string, string>> mapFunc) { Uri devToolsHost = options.DevToolsUrl; app.UseRouter(router => { router.MapGet("/", Copy); router.MapGet("/favicon.ico", Copy); router.MapGet("json", RewriteArray); router.MapGet("json/list", RewriteArray); router.MapGet("json/version", RewriteSingle); router.MapGet("json/new", RewriteSingle); router.MapGet("devtools/page/{pageId}", ConnectProxy); router.MapGet("devtools/browser/{pageId}", ConnectProxy); string GetEndpoint(HttpContext context) { HttpRequest request = context.Request; PathString requestPath = request.Path; return $"{devToolsHost.Scheme}://{devToolsHost.Authority}{request.Path}{request.QueryString}"; } async Task Copy(HttpContext context) { using (var httpClient = new HttpClient { Timeout = TimeSpan.FromSeconds(5) }) { HttpResponseMessage response = await httpClient.GetAsync(GetEndpoint(context)); context.Response.ContentType = response.Content.Headers.ContentType.ToString(); if ((response.Content.Headers.ContentLength ?? 0) > 0) context.Response.ContentLength = response.Content.Headers.ContentLength; byte[] bytes = await response.Content.ReadAsByteArrayAsync(); await context.Response.Body.WriteAsync(bytes); } } async Task RewriteSingle(HttpContext context) { Dictionary<string, string> version = await ProxyGetJsonAsync<Dictionary<string, string>>(GetEndpoint(context)); context.Response.ContentType = "application/json"; await context.Response.WriteAsync( JsonSerializer.Serialize(mapFunc(version, context, devToolsHost))); } async Task RewriteArray(HttpContext context) { Dictionary<string, string>[] tabs = await ProxyGetJsonAsync<Dictionary<string, string>[]>(GetEndpoint(context)); Dictionary<string, string>[] alteredTabs = tabs.Select(t => mapFunc(t, context, devToolsHost)).ToArray(); context.Response.ContentType = "application/json"; string text = JsonSerializer.Serialize(alteredTabs); context.Response.ContentLength = text.Length; await context.Response.WriteAsync(text); } async Task ConnectProxy(HttpContext context) { if (!context.WebSockets.IsWebSocketRequest) { context.Response.StatusCode = 400; return; } var endpoint = new Uri($"ws://{devToolsHost.Authority}{context.Request.Path}"); int runtimeId = 0; if (context.Request.Query.TryGetValue("RuntimeId", out StringValues runtimeIdValue) && int.TryParse(runtimeIdValue.FirstOrDefault(), out int parsedId)) { runtimeId = parsedId; } try { using ILoggerFactory loggerFactory = LoggerFactory.Create(builder => builder.AddSimpleConsole(options => { options.SingleLine = true; options.TimestampFormat = "[HH:mm:ss] "; }) .AddFilter(null, LogLevel.Information) ); context.Request.Query.TryGetValue("urlSymbolServer", out StringValues urlSymbolServerList); var proxy = new DebuggerProxy(loggerFactory, urlSymbolServerList.ToList(), runtimeId); System.Net.WebSockets.WebSocket ideSocket = await context.WebSockets.AcceptWebSocketAsync(); await proxy.Run(endpoint, ideSocket); } catch (Exception e) { Console.WriteLine("got exception {0}", e); } } }); return app; } private static async Task<T> ProxyGetJsonAsync<T>(string url) { using (var httpClient = new HttpClient()) { HttpResponseMessage response = await httpClient.GetAsync(url); return await JsonSerializer.DeserializeAsync<T>(await response.Content.ReadAsStreamAsync()); } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Collections.Generic; using System.Diagnostics; using System.Linq; using System.Net.Http; using System.Text.Json; using System.Threading.Tasks; using Microsoft.AspNetCore.Builder; using Microsoft.AspNetCore.Hosting; using Microsoft.AspNetCore.Http; using Microsoft.AspNetCore.Routing; using Microsoft.Extensions.Configuration; using Microsoft.Extensions.DependencyInjection; using Microsoft.Extensions.Hosting; using Microsoft.Extensions.Logging; using Microsoft.Extensions.Options; using Microsoft.Extensions.Primitives; namespace Microsoft.WebAssembly.Diagnostics { internal class Startup { // This method gets called by the runtime. Use this method to add services to the container. // For more information on how to configure your application, visit https://go.microsoft.com/fwlink/?LinkID=398940 public void ConfigureServices(IServiceCollection services) => services.AddRouting() .Configure<ProxyOptions>(Configuration); public Startup(IConfiguration configuration) => Configuration = configuration; public IConfiguration Configuration { get; } // This method gets called by the runtime. Use this method to configure the HTTP request pipeline. public void Configure(IApplicationBuilder app, IOptionsMonitor<ProxyOptions> optionsAccessor, IWebHostEnvironment env, IHostApplicationLifetime applicationLifetime) { ProxyOptions options = optionsAccessor.CurrentValue; if (options.OwnerPid.HasValue) { Process ownerProcess = Process.GetProcessById(options.OwnerPid.Value); if (ownerProcess != null) { ownerProcess.EnableRaisingEvents = true; ownerProcess.Exited += (sender, eventArgs) => { applicationLifetime.StopApplication(); }; } } app.UseDeveloperExceptionPage() .UseWebSockets() .UseDebugProxy(options); } } internal static class DebugExtensions { public static Dictionary<string, string> MapValues(Dictionary<string, string> response, HttpContext context, Uri debuggerHost) { var filtered = new Dictionary<string, string>(); HttpRequest request = context.Request; foreach (string key in response.Keys) { switch (key) { case "devtoolsFrontendUrl": string front = response[key]; filtered[key] = $"{debuggerHost.Scheme}://{debuggerHost.Authority}{front.Replace($"ws={debuggerHost.Authority}", $"ws={request.Host}")}"; break; case "webSocketDebuggerUrl": var page = new Uri(response[key]); filtered[key] = $"{page.Scheme}://{request.Host}{page.PathAndQuery}"; break; default: filtered[key] = response[key]; break; } } return filtered; } public static IApplicationBuilder UseDebugProxy(this IApplicationBuilder app, ProxyOptions options) => UseDebugProxy(app, options, MapValues); public static IApplicationBuilder UseDebugProxy( this IApplicationBuilder app, ProxyOptions options, Func<Dictionary<string, string>, HttpContext, Uri, Dictionary<string, string>> mapFunc) { Uri devToolsHost = options.DevToolsUrl; app.UseRouter(router => { router.MapGet("/", Copy); router.MapGet("/favicon.ico", Copy); router.MapGet("json", RewriteArray); router.MapGet("json/list", RewriteArray); router.MapGet("json/version", RewriteSingle); router.MapGet("json/new", RewriteSingle); router.MapGet("devtools/page/{pageId}", ConnectProxy); router.MapGet("devtools/browser/{pageId}", ConnectProxy); string GetEndpoint(HttpContext context) { HttpRequest request = context.Request; PathString requestPath = request.Path; return $"{devToolsHost.Scheme}://{devToolsHost.Authority}{request.Path}{request.QueryString}"; } async Task Copy(HttpContext context) { using (var httpClient = new HttpClient { Timeout = TimeSpan.FromSeconds(5) }) { HttpResponseMessage response = await httpClient.GetAsync(GetEndpoint(context)); context.Response.ContentType = response.Content.Headers.ContentType.ToString(); if ((response.Content.Headers.ContentLength ?? 0) > 0) context.Response.ContentLength = response.Content.Headers.ContentLength; byte[] bytes = await response.Content.ReadAsByteArrayAsync(); await context.Response.Body.WriteAsync(bytes); } } async Task RewriteSingle(HttpContext context) { Dictionary<string, string> version = await ProxyGetJsonAsync<Dictionary<string, string>>(GetEndpoint(context)); context.Response.ContentType = "application/json"; await context.Response.WriteAsync( JsonSerializer.Serialize(mapFunc(version, context, devToolsHost))); } async Task RewriteArray(HttpContext context) { Dictionary<string, string>[] tabs = await ProxyGetJsonAsync<Dictionary<string, string>[]>(GetEndpoint(context)); Dictionary<string, string>[] alteredTabs = tabs.Select(t => mapFunc(t, context, devToolsHost)).ToArray(); context.Response.ContentType = "application/json"; string text = JsonSerializer.Serialize(alteredTabs); context.Response.ContentLength = text.Length; await context.Response.WriteAsync(text); } async Task ConnectProxy(HttpContext context) { if (!context.WebSockets.IsWebSocketRequest) { context.Response.StatusCode = 400; return; } var endpoint = new Uri($"ws://{devToolsHost.Authority}{context.Request.Path}"); int runtimeId = 0; if (context.Request.Query.TryGetValue("RuntimeId", out StringValues runtimeIdValue) && int.TryParse(runtimeIdValue.FirstOrDefault(), out int parsedId)) { runtimeId = parsedId; } try { using ILoggerFactory loggerFactory = LoggerFactory.Create(builder => builder.AddSimpleConsole(options => { options.SingleLine = true; options.TimestampFormat = "[HH:mm:ss] "; }) .AddFilter(null, LogLevel.Information) ); context.Request.Query.TryGetValue("urlSymbolServer", out StringValues urlSymbolServerList); var proxy = new DebuggerProxy(loggerFactory, urlSymbolServerList.ToList(), runtimeId); System.Net.WebSockets.WebSocket ideSocket = await context.WebSockets.AcceptWebSocketAsync(); await proxy.Run(endpoint, ideSocket); } catch (Exception e) { Console.WriteLine("got exception {0}", e); } } }); return app; } private static async Task<T> ProxyGetJsonAsync<T>(string url) { using (var httpClient = new HttpClient()) { HttpResponseMessage response = await httpClient.GetAsync(url); return await JsonSerializer.DeserializeAsync<T>(await response.Content.ReadAsStreamAsync()); } } } }
-1
dotnet/runtime
66,367
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled
When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
jakobbotsch
2022-03-09T00:01:53Z
2022-03-09T12:59:09Z
c9f7f7389e8e9a00d501aef696333b67d218baac
cef825fd5425203ac28d073bf9fccc33c638f179
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled. When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
./src/coreclr/md/tables/export.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // File: export.h // // // Popular types defined in MetaData\Tables directory. // It's supposed to be included from other (MetaData) subcomponents, not from this directory. // // ====================================================================================== #pragma once #include "table.h"
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // File: export.h // // // Popular types defined in MetaData\Tables directory. // It's supposed to be included from other (MetaData) subcomponents, not from this directory. // // ====================================================================================== #pragma once #include "table.h"
-1
dotnet/runtime
66,367
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled
When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
jakobbotsch
2022-03-09T00:01:53Z
2022-03-09T12:59:09Z
c9f7f7389e8e9a00d501aef696333b67d218baac
cef825fd5425203ac28d073bf9fccc33c638f179
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled. When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
./src/libraries/System.Security.Cryptography/tests/PemEncodingTests.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using Xunit; namespace System.Security.Cryptography.Tests { public static class PemEncodingTests { [Fact] public static void GetEncodedSize_Empty() { int size = PemEncoding.GetEncodedSize(labelLength: 0, dataLength: 0); Assert.Equal(31, size); } [Theory] [InlineData(1, 0, 33)] [InlineData(1, 1, 38)] [InlineData(16, 2048, 2838)] public static void GetEncodedSize_Simple(int labelLength, int dataLength, int expectedSize) { int size = PemEncoding.GetEncodedSize(labelLength, dataLength); Assert.Equal(expectedSize, size); } [Theory] [InlineData(1_073_741_808, 0, int.MaxValue)] [InlineData(1_073_741_805, 1, int.MaxValue - 1)] [InlineData(0, 1_585_834_053, int.MaxValue - 2)] [InlineData(1, 1_585_834_053, int.MaxValue)] public static void GetEncodedSize_Boundaries(int labelLength, int dataLength, int expectedSize) { int size = PemEncoding.GetEncodedSize(labelLength, dataLength); Assert.Equal(expectedSize, size); } [Fact] public static void GetEncodedSize_LabelLength_Overflow() { AssertExtensions.Throws<ArgumentOutOfRangeException>("labelLength", () => PemEncoding.GetEncodedSize(labelLength: 1_073_741_809, dataLength: 0)); } [Fact] public static void GetEncodedSize_DataLength_Overflow() { AssertExtensions.Throws<ArgumentOutOfRangeException>("dataLength", () => PemEncoding.GetEncodedSize(labelLength: 0, dataLength: 1_585_834_054)); } [Fact] public static void GetEncodedSize_Combined_Overflow() { Assert.Throws<ArgumentException>( () => PemEncoding.GetEncodedSize(labelLength: 2, dataLength: 1_585_834_052)); } [Fact] public static void GetEncodedSize_DataLength_Negative() { AssertExtensions.Throws<ArgumentOutOfRangeException>("dataLength", () => PemEncoding.GetEncodedSize(labelLength: 0, dataLength: -1)); } [Fact] public static void GetEncodedSize_LabelLength_Negative() { AssertExtensions.Throws<ArgumentOutOfRangeException>("labelLength", () => PemEncoding.GetEncodedSize(labelLength: -1, dataLength: 0)); } [Fact] public static void TryWrite_Simple() { char[] buffer = new char[1000]; string label = "HELLO"; byte[] content = new byte[] { 0x66, 0x6F, 0x6F }; Assert.True(PemEncoding.TryWrite(label, content, buffer, out int charsWritten)); string pem = new string(buffer, 0, charsWritten); Assert.Equal("-----BEGIN HELLO-----\nZm9v\n-----END HELLO-----", pem); } [Fact] public static void Write_Simple() { string label = "HELLO"; byte[] content = new byte[] { 0x66, 0x6F, 0x6F }; char[] result = PemEncoding.Write(label, content); string pem = new string(result); Assert.Equal("-----BEGIN HELLO-----\nZm9v\n-----END HELLO-----", pem); } [Fact] public static void TryWrite_Empty() { char[] buffer = new char[31]; Assert.True(PemEncoding.TryWrite(default, default, buffer, out int charsWritten)); string pem = new string(buffer, 0, charsWritten); Assert.Equal("-----BEGIN -----\n-----END -----", pem); } [Fact] public static void Write_Empty() { char[] result = PemEncoding.Write(default, default); string pem = new string(result); Assert.Equal("-----BEGIN -----\n-----END -----", pem); } [Fact] public static void TryWrite_BufferTooSmall() { char[] buffer = new char[30]; Assert.False(PemEncoding.TryWrite(default, default, buffer, out _)); } [Fact] public static void TryWrite_ExactLineNoPadding() { char[] buffer = new char[1000]; ReadOnlySpan<byte> data = new byte[] { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7 }; string label = "FANCY DATA"; Assert.True(PemEncoding.TryWrite(label, data, buffer, out int charsWritten)); string pem = new string(buffer, 0, charsWritten); string expected = "-----BEGIN FANCY DATA-----\n" + "AAECAwQFBgcICQABAgMEBQYHCAkAAQIDBAUGBwgJAAECAwQFBgcICQABAgMEBQYH\n" + "-----END FANCY DATA-----"; Assert.Equal(expected, pem); } [Fact] public static void Write_ExactLineNoPadding() { ReadOnlySpan<byte> data = new byte[] { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7 }; string label = "FANCY DATA"; char[] result = PemEncoding.Write(label, data); string pem = new string(result); string expected = "-----BEGIN FANCY DATA-----\n" + "AAECAwQFBgcICQABAgMEBQYHCAkAAQIDBAUGBwgJAAECAwQFBgcICQABAgMEBQYH\n" + "-----END FANCY DATA-----"; Assert.Equal(expected, pem); } [Fact] public static void TryWrite_DoesNotWriteOutsideBounds() { Span<char> buffer = new char[1000]; buffer.Fill('!'); ReadOnlySpan<byte> data = new byte[] { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7 }; Span<char> write = buffer[10..]; string label = "FANCY DATA"; Assert.True(PemEncoding.TryWrite(label, data, write, out int charsWritten)); string pem = new string(buffer[..(charsWritten + 20)]); string expected = "!!!!!!!!!!-----BEGIN FANCY DATA-----\n" + "AAECAwQFBgcICQABAgMEBQYHCAkAAQIDBAUGBwgJAAECAwQFBgcICQABAgMEBQYH\n" + "-----END FANCY DATA-----!!!!!!!!!!"; Assert.Equal(expected, pem); } [Fact] public static void TryWrite_WrapPadding() { char[] buffer = new char[1000]; ReadOnlySpan<byte> data = new byte[] { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }; string label = "UNFANCY DATA"; Assert.True(PemEncoding.TryWrite(label, data, buffer, out int charsWritten)); string pem = new string(buffer, 0, charsWritten); string expected = "-----BEGIN UNFANCY DATA-----\n" + "AAECAwQFBgcICQABAgMEBQYHCAkAAQIDBAUGBwgJAAECAwQFBgcICQABAgMEBQYH\n" + "CAk=\n" + "-----END UNFANCY DATA-----"; Assert.Equal(expected, pem); } [Fact] public static void Write_WrapPadding() { ReadOnlySpan<byte> data = new byte[] { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }; string label = "UNFANCY DATA"; char[] result = PemEncoding.Write(label, data); string pem = new string(result); string expected = "-----BEGIN UNFANCY DATA-----\n" + "AAECAwQFBgcICQABAgMEBQYHCAkAAQIDBAUGBwgJAAECAwQFBgcICQABAgMEBQYH\n" + "CAk=\n" + "-----END UNFANCY DATA-----"; Assert.Equal(expected, pem); } [Fact] public static void TryWrite_EcKey() { char[] buffer = new char[1000]; ReadOnlySpan<byte> data = new byte[] { 0x30, 0x74, 0x02, 0x01, 0x01, 0x04, 0x20, 0x20, 0x59, 0xef, 0xff, 0x13, 0xd4, 0x92, 0xf6, 0x6a, 0x6b, 0xcd, 0x07, 0xf4, 0x12, 0x86, 0x08, 0x6d, 0x81, 0x93, 0xed, 0x9c, 0xf0, 0xf8, 0x5b, 0xeb, 0x00, 0x70, 0x7c, 0x40, 0xfa, 0x12, 0x6c, 0xa0, 0x07, 0x06, 0x05, 0x2b, 0x81, 0x04, 0x00, 0x0a, 0xa1, 0x44, 0x03, 0x42, 0x00, 0x04, 0xdf, 0x23, 0x42, 0xe5, 0xab, 0x3c, 0x25, 0x53, 0x79, 0x32, 0x31, 0x7d, 0xe6, 0x87, 0xcd, 0x4a, 0x04, 0x41, 0x55, 0x78, 0xdf, 0xd0, 0x22, 0xad, 0x60, 0x44, 0x96, 0x7c, 0xf9, 0xe6, 0xbd, 0x3d, 0xe7, 0xf9, 0xc3, 0x0c, 0x25, 0x40, 0x7d, 0x95, 0x42, 0x5f, 0x76, 0x41, 0x4d, 0x81, 0xa4, 0x81, 0xec, 0x99, 0x41, 0xfa, 0x4a, 0xd9, 0x55, 0x55, 0x7c, 0x4f, 0xb1, 0xd9, 0x41, 0x75, 0x43, 0x44 }; string label = "EC PRIVATE KEY"; Assert.True(PemEncoding.TryWrite(label, data, buffer, out int charsWritten)); string pem = new string(buffer, 0, charsWritten); string expected = "-----BEGIN EC PRIVATE KEY-----\n" + "MHQCAQEEICBZ7/8T1JL2amvNB/QShghtgZPtnPD4W+sAcHxA+hJsoAcGBSuBBAAK\n" + "oUQDQgAE3yNC5as8JVN5MjF95ofNSgRBVXjf0CKtYESWfPnmvT3n+cMMJUB9lUJf\n" + "dkFNgaSB7JlB+krZVVV8T7HZQXVDRA==\n" + "-----END EC PRIVATE KEY-----"; Assert.Equal(expected, pem); } [Fact] public static void Write_EcKey() { ReadOnlySpan<byte> data = new byte[] { 0x30, 0x74, 0x02, 0x01, 0x01, 0x04, 0x20, 0x20, 0x59, 0xef, 0xff, 0x13, 0xd4, 0x92, 0xf6, 0x6a, 0x6b, 0xcd, 0x07, 0xf4, 0x12, 0x86, 0x08, 0x6d, 0x81, 0x93, 0xed, 0x9c, 0xf0, 0xf8, 0x5b, 0xeb, 0x00, 0x70, 0x7c, 0x40, 0xfa, 0x12, 0x6c, 0xa0, 0x07, 0x06, 0x05, 0x2b, 0x81, 0x04, 0x00, 0x0a, 0xa1, 0x44, 0x03, 0x42, 0x00, 0x04, 0xdf, 0x23, 0x42, 0xe5, 0xab, 0x3c, 0x25, 0x53, 0x79, 0x32, 0x31, 0x7d, 0xe6, 0x87, 0xcd, 0x4a, 0x04, 0x41, 0x55, 0x78, 0xdf, 0xd0, 0x22, 0xad, 0x60, 0x44, 0x96, 0x7c, 0xf9, 0xe6, 0xbd, 0x3d, 0xe7, 0xf9, 0xc3, 0x0c, 0x25, 0x40, 0x7d, 0x95, 0x42, 0x5f, 0x76, 0x41, 0x4d, 0x81, 0xa4, 0x81, 0xec, 0x99, 0x41, 0xfa, 0x4a, 0xd9, 0x55, 0x55, 0x7c, 0x4f, 0xb1, 0xd9, 0x41, 0x75, 0x43, 0x44 }; string label = "EC PRIVATE KEY"; char[] result = PemEncoding.Write(label, data); string pem = new string(result); string expected = "-----BEGIN EC PRIVATE KEY-----\n" + "MHQCAQEEICBZ7/8T1JL2amvNB/QShghtgZPtnPD4W+sAcHxA+hJsoAcGBSuBBAAK\n" + "oUQDQgAE3yNC5as8JVN5MjF95ofNSgRBVXjf0CKtYESWfPnmvT3n+cMMJUB9lUJf\n" + "dkFNgaSB7JlB+krZVVV8T7HZQXVDRA==\n" + "-----END EC PRIVATE KEY-----"; Assert.Equal(expected, pem); } [Fact] public static void TryWrite_Throws_InvalidLabel() { char[] buffer = new char[50]; AssertExtensions.Throws<ArgumentException>("label", () => PemEncoding.TryWrite("\n", default, buffer, out _)); } [Fact] public static void Write_Throws_InvalidLabel() { AssertExtensions.Throws<ArgumentException>("label", () => PemEncoding.Write("\n", default)); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using Xunit; namespace System.Security.Cryptography.Tests { public static class PemEncodingTests { [Fact] public static void GetEncodedSize_Empty() { int size = PemEncoding.GetEncodedSize(labelLength: 0, dataLength: 0); Assert.Equal(31, size); } [Theory] [InlineData(1, 0, 33)] [InlineData(1, 1, 38)] [InlineData(16, 2048, 2838)] public static void GetEncodedSize_Simple(int labelLength, int dataLength, int expectedSize) { int size = PemEncoding.GetEncodedSize(labelLength, dataLength); Assert.Equal(expectedSize, size); } [Theory] [InlineData(1_073_741_808, 0, int.MaxValue)] [InlineData(1_073_741_805, 1, int.MaxValue - 1)] [InlineData(0, 1_585_834_053, int.MaxValue - 2)] [InlineData(1, 1_585_834_053, int.MaxValue)] public static void GetEncodedSize_Boundaries(int labelLength, int dataLength, int expectedSize) { int size = PemEncoding.GetEncodedSize(labelLength, dataLength); Assert.Equal(expectedSize, size); } [Fact] public static void GetEncodedSize_LabelLength_Overflow() { AssertExtensions.Throws<ArgumentOutOfRangeException>("labelLength", () => PemEncoding.GetEncodedSize(labelLength: 1_073_741_809, dataLength: 0)); } [Fact] public static void GetEncodedSize_DataLength_Overflow() { AssertExtensions.Throws<ArgumentOutOfRangeException>("dataLength", () => PemEncoding.GetEncodedSize(labelLength: 0, dataLength: 1_585_834_054)); } [Fact] public static void GetEncodedSize_Combined_Overflow() { Assert.Throws<ArgumentException>( () => PemEncoding.GetEncodedSize(labelLength: 2, dataLength: 1_585_834_052)); } [Fact] public static void GetEncodedSize_DataLength_Negative() { AssertExtensions.Throws<ArgumentOutOfRangeException>("dataLength", () => PemEncoding.GetEncodedSize(labelLength: 0, dataLength: -1)); } [Fact] public static void GetEncodedSize_LabelLength_Negative() { AssertExtensions.Throws<ArgumentOutOfRangeException>("labelLength", () => PemEncoding.GetEncodedSize(labelLength: -1, dataLength: 0)); } [Fact] public static void TryWrite_Simple() { char[] buffer = new char[1000]; string label = "HELLO"; byte[] content = new byte[] { 0x66, 0x6F, 0x6F }; Assert.True(PemEncoding.TryWrite(label, content, buffer, out int charsWritten)); string pem = new string(buffer, 0, charsWritten); Assert.Equal("-----BEGIN HELLO-----\nZm9v\n-----END HELLO-----", pem); } [Fact] public static void Write_Simple() { string label = "HELLO"; byte[] content = new byte[] { 0x66, 0x6F, 0x6F }; char[] result = PemEncoding.Write(label, content); string pem = new string(result); Assert.Equal("-----BEGIN HELLO-----\nZm9v\n-----END HELLO-----", pem); } [Fact] public static void TryWrite_Empty() { char[] buffer = new char[31]; Assert.True(PemEncoding.TryWrite(default, default, buffer, out int charsWritten)); string pem = new string(buffer, 0, charsWritten); Assert.Equal("-----BEGIN -----\n-----END -----", pem); } [Fact] public static void Write_Empty() { char[] result = PemEncoding.Write(default, default); string pem = new string(result); Assert.Equal("-----BEGIN -----\n-----END -----", pem); } [Fact] public static void TryWrite_BufferTooSmall() { char[] buffer = new char[30]; Assert.False(PemEncoding.TryWrite(default, default, buffer, out _)); } [Fact] public static void TryWrite_ExactLineNoPadding() { char[] buffer = new char[1000]; ReadOnlySpan<byte> data = new byte[] { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7 }; string label = "FANCY DATA"; Assert.True(PemEncoding.TryWrite(label, data, buffer, out int charsWritten)); string pem = new string(buffer, 0, charsWritten); string expected = "-----BEGIN FANCY DATA-----\n" + "AAECAwQFBgcICQABAgMEBQYHCAkAAQIDBAUGBwgJAAECAwQFBgcICQABAgMEBQYH\n" + "-----END FANCY DATA-----"; Assert.Equal(expected, pem); } [Fact] public static void Write_ExactLineNoPadding() { ReadOnlySpan<byte> data = new byte[] { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7 }; string label = "FANCY DATA"; char[] result = PemEncoding.Write(label, data); string pem = new string(result); string expected = "-----BEGIN FANCY DATA-----\n" + "AAECAwQFBgcICQABAgMEBQYHCAkAAQIDBAUGBwgJAAECAwQFBgcICQABAgMEBQYH\n" + "-----END FANCY DATA-----"; Assert.Equal(expected, pem); } [Fact] public static void TryWrite_DoesNotWriteOutsideBounds() { Span<char> buffer = new char[1000]; buffer.Fill('!'); ReadOnlySpan<byte> data = new byte[] { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7 }; Span<char> write = buffer[10..]; string label = "FANCY DATA"; Assert.True(PemEncoding.TryWrite(label, data, write, out int charsWritten)); string pem = new string(buffer[..(charsWritten + 20)]); string expected = "!!!!!!!!!!-----BEGIN FANCY DATA-----\n" + "AAECAwQFBgcICQABAgMEBQYHCAkAAQIDBAUGBwgJAAECAwQFBgcICQABAgMEBQYH\n" + "-----END FANCY DATA-----!!!!!!!!!!"; Assert.Equal(expected, pem); } [Fact] public static void TryWrite_WrapPadding() { char[] buffer = new char[1000]; ReadOnlySpan<byte> data = new byte[] { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }; string label = "UNFANCY DATA"; Assert.True(PemEncoding.TryWrite(label, data, buffer, out int charsWritten)); string pem = new string(buffer, 0, charsWritten); string expected = "-----BEGIN UNFANCY DATA-----\n" + "AAECAwQFBgcICQABAgMEBQYHCAkAAQIDBAUGBwgJAAECAwQFBgcICQABAgMEBQYH\n" + "CAk=\n" + "-----END UNFANCY DATA-----"; Assert.Equal(expected, pem); } [Fact] public static void Write_WrapPadding() { ReadOnlySpan<byte> data = new byte[] { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }; string label = "UNFANCY DATA"; char[] result = PemEncoding.Write(label, data); string pem = new string(result); string expected = "-----BEGIN UNFANCY DATA-----\n" + "AAECAwQFBgcICQABAgMEBQYHCAkAAQIDBAUGBwgJAAECAwQFBgcICQABAgMEBQYH\n" + "CAk=\n" + "-----END UNFANCY DATA-----"; Assert.Equal(expected, pem); } [Fact] public static void TryWrite_EcKey() { char[] buffer = new char[1000]; ReadOnlySpan<byte> data = new byte[] { 0x30, 0x74, 0x02, 0x01, 0x01, 0x04, 0x20, 0x20, 0x59, 0xef, 0xff, 0x13, 0xd4, 0x92, 0xf6, 0x6a, 0x6b, 0xcd, 0x07, 0xf4, 0x12, 0x86, 0x08, 0x6d, 0x81, 0x93, 0xed, 0x9c, 0xf0, 0xf8, 0x5b, 0xeb, 0x00, 0x70, 0x7c, 0x40, 0xfa, 0x12, 0x6c, 0xa0, 0x07, 0x06, 0x05, 0x2b, 0x81, 0x04, 0x00, 0x0a, 0xa1, 0x44, 0x03, 0x42, 0x00, 0x04, 0xdf, 0x23, 0x42, 0xe5, 0xab, 0x3c, 0x25, 0x53, 0x79, 0x32, 0x31, 0x7d, 0xe6, 0x87, 0xcd, 0x4a, 0x04, 0x41, 0x55, 0x78, 0xdf, 0xd0, 0x22, 0xad, 0x60, 0x44, 0x96, 0x7c, 0xf9, 0xe6, 0xbd, 0x3d, 0xe7, 0xf9, 0xc3, 0x0c, 0x25, 0x40, 0x7d, 0x95, 0x42, 0x5f, 0x76, 0x41, 0x4d, 0x81, 0xa4, 0x81, 0xec, 0x99, 0x41, 0xfa, 0x4a, 0xd9, 0x55, 0x55, 0x7c, 0x4f, 0xb1, 0xd9, 0x41, 0x75, 0x43, 0x44 }; string label = "EC PRIVATE KEY"; Assert.True(PemEncoding.TryWrite(label, data, buffer, out int charsWritten)); string pem = new string(buffer, 0, charsWritten); string expected = "-----BEGIN EC PRIVATE KEY-----\n" + "MHQCAQEEICBZ7/8T1JL2amvNB/QShghtgZPtnPD4W+sAcHxA+hJsoAcGBSuBBAAK\n" + "oUQDQgAE3yNC5as8JVN5MjF95ofNSgRBVXjf0CKtYESWfPnmvT3n+cMMJUB9lUJf\n" + "dkFNgaSB7JlB+krZVVV8T7HZQXVDRA==\n" + "-----END EC PRIVATE KEY-----"; Assert.Equal(expected, pem); } [Fact] public static void Write_EcKey() { ReadOnlySpan<byte> data = new byte[] { 0x30, 0x74, 0x02, 0x01, 0x01, 0x04, 0x20, 0x20, 0x59, 0xef, 0xff, 0x13, 0xd4, 0x92, 0xf6, 0x6a, 0x6b, 0xcd, 0x07, 0xf4, 0x12, 0x86, 0x08, 0x6d, 0x81, 0x93, 0xed, 0x9c, 0xf0, 0xf8, 0x5b, 0xeb, 0x00, 0x70, 0x7c, 0x40, 0xfa, 0x12, 0x6c, 0xa0, 0x07, 0x06, 0x05, 0x2b, 0x81, 0x04, 0x00, 0x0a, 0xa1, 0x44, 0x03, 0x42, 0x00, 0x04, 0xdf, 0x23, 0x42, 0xe5, 0xab, 0x3c, 0x25, 0x53, 0x79, 0x32, 0x31, 0x7d, 0xe6, 0x87, 0xcd, 0x4a, 0x04, 0x41, 0x55, 0x78, 0xdf, 0xd0, 0x22, 0xad, 0x60, 0x44, 0x96, 0x7c, 0xf9, 0xe6, 0xbd, 0x3d, 0xe7, 0xf9, 0xc3, 0x0c, 0x25, 0x40, 0x7d, 0x95, 0x42, 0x5f, 0x76, 0x41, 0x4d, 0x81, 0xa4, 0x81, 0xec, 0x99, 0x41, 0xfa, 0x4a, 0xd9, 0x55, 0x55, 0x7c, 0x4f, 0xb1, 0xd9, 0x41, 0x75, 0x43, 0x44 }; string label = "EC PRIVATE KEY"; char[] result = PemEncoding.Write(label, data); string pem = new string(result); string expected = "-----BEGIN EC PRIVATE KEY-----\n" + "MHQCAQEEICBZ7/8T1JL2amvNB/QShghtgZPtnPD4W+sAcHxA+hJsoAcGBSuBBAAK\n" + "oUQDQgAE3yNC5as8JVN5MjF95ofNSgRBVXjf0CKtYESWfPnmvT3n+cMMJUB9lUJf\n" + "dkFNgaSB7JlB+krZVVV8T7HZQXVDRA==\n" + "-----END EC PRIVATE KEY-----"; Assert.Equal(expected, pem); } [Fact] public static void TryWrite_Throws_InvalidLabel() { char[] buffer = new char[50]; AssertExtensions.Throws<ArgumentException>("label", () => PemEncoding.TryWrite("\n", default, buffer, out _)); } [Fact] public static void Write_Throws_InvalidLabel() { AssertExtensions.Throws<ArgumentException>("label", () => PemEncoding.Write("\n", default)); } } }
-1
dotnet/runtime
66,367
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled
When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
jakobbotsch
2022-03-09T00:01:53Z
2022-03-09T12:59:09Z
c9f7f7389e8e9a00d501aef696333b67d218baac
cef825fd5425203ac28d073bf9fccc33c638f179
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled. When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
./src/tests/JIT/Methodical/casts/array/castclass_ldlen.il
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // ldlen.il .assembly extern mscorlib { } .assembly extern System.Console { .publickeytoken = (B0 3F 5F 7F 11 D5 0A 3A ) .ver 4:0:0:0 } .assembly 'castclass_ldlen' { } .assembly extern xunit.core {} .namespace JitTest { .class private auto ansi beforefieldinit Test extends [mscorlib]System.Object { .method private hidebysig static int32 Main() cil managed { .custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = ( 01 00 00 00 ) .entrypoint .maxstack 4 .locals (class [mscorlib]System.Array) ldc.i4.6 newarr [mscorlib]System.Int32 castclass [mscorlib]System.Array stloc.0 ldloc.0 castclass int32[] ldlen conv.i4 ldc.i4.6 beq.s continue_1 newobj instance void [mscorlib]System.Exception::.ctor() throw continue_1: ldc.i4.6 newarr [mscorlib]System.Single castclass [mscorlib]System.Array stloc.0 ldloc.0 castclass float32[] ldlen conv.i4 ldc.i4.6 beq.s continue_2 newobj instance void [mscorlib]System.Exception::.ctor() throw continue_2: ldc.i4.6 newarr [mscorlib]System.Double castclass [mscorlib]System.Array stloc.0 ldloc.0 castclass float64[] ldlen conv.i4 ldc.i4.6 beq.s continue_3 newobj instance void [mscorlib]System.Exception::.ctor() throw continue_3: ldc.i4.6 newarr [mscorlib]System.IntPtr castclass [mscorlib]System.Array stloc.0 ldloc.0 castclass native int[] ldlen conv.i4 ldc.i4.6 beq.s continue_4 newobj instance void [mscorlib]System.Exception::.ctor() throw continue_4: ldc.i4 100 ret } .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { .maxstack 8 IL_0000: ldarg.0 IL_0001: call instance void [mscorlib]System.Object::.ctor() IL_0006: ret } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // ldlen.il .assembly extern mscorlib { } .assembly extern System.Console { .publickeytoken = (B0 3F 5F 7F 11 D5 0A 3A ) .ver 4:0:0:0 } .assembly 'castclass_ldlen' { } .assembly extern xunit.core {} .namespace JitTest { .class private auto ansi beforefieldinit Test extends [mscorlib]System.Object { .method private hidebysig static int32 Main() cil managed { .custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = ( 01 00 00 00 ) .entrypoint .maxstack 4 .locals (class [mscorlib]System.Array) ldc.i4.6 newarr [mscorlib]System.Int32 castclass [mscorlib]System.Array stloc.0 ldloc.0 castclass int32[] ldlen conv.i4 ldc.i4.6 beq.s continue_1 newobj instance void [mscorlib]System.Exception::.ctor() throw continue_1: ldc.i4.6 newarr [mscorlib]System.Single castclass [mscorlib]System.Array stloc.0 ldloc.0 castclass float32[] ldlen conv.i4 ldc.i4.6 beq.s continue_2 newobj instance void [mscorlib]System.Exception::.ctor() throw continue_2: ldc.i4.6 newarr [mscorlib]System.Double castclass [mscorlib]System.Array stloc.0 ldloc.0 castclass float64[] ldlen conv.i4 ldc.i4.6 beq.s continue_3 newobj instance void [mscorlib]System.Exception::.ctor() throw continue_3: ldc.i4.6 newarr [mscorlib]System.IntPtr castclass [mscorlib]System.Array stloc.0 ldloc.0 castclass native int[] ldlen conv.i4 ldc.i4.6 beq.s continue_4 newobj instance void [mscorlib]System.Exception::.ctor() throw continue_4: ldc.i4 100 ret } .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { .maxstack 8 IL_0000: ldarg.0 IL_0001: call instance void [mscorlib]System.Object::.ctor() IL_0006: ret } } }
-1
dotnet/runtime
66,367
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled
When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
jakobbotsch
2022-03-09T00:01:53Z
2022-03-09T12:59:09Z
c9f7f7389e8e9a00d501aef696333b67d218baac
cef825fd5425203ac28d073bf9fccc33c638f179
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled. When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
./src/libraries/System.Security.Cryptography/src/System/Security/Cryptography/UniversalCryptoEncryptor.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Diagnostics; using System.Security.Cryptography; using Internal.Cryptography; namespace System.Security.Cryptography { // // A cross-platform ICryptoTransform implementation for encryption. // // - Implements the various padding algorithms (as we support padding algorithms that the underlying native apis don't.) // // - Parameterized by a BasicSymmetricCipher which encapsulates the algorithm, key, IV, chaining mode, direction of encryption // and the underlying native apis implementing the encryption. // internal sealed class UniversalCryptoEncryptor : UniversalCryptoTransform { public UniversalCryptoEncryptor(PaddingMode paddingMode, BasicSymmetricCipher basicSymmetricCipher) : base(paddingMode, basicSymmetricCipher) { } protected override int UncheckedTransformBlock(ReadOnlySpan<byte> inputBuffer, Span<byte> outputBuffer) { return BasicSymmetricCipher.Transform(inputBuffer, outputBuffer); } protected override int UncheckedTransformFinalBlock(ReadOnlySpan<byte> inputBuffer, Span<byte> outputBuffer) { // The only caller of this method is the array-allocating overload, outputBuffer is // always new memory, not a user-provided buffer. Debug.Assert(!inputBuffer.Overlaps(outputBuffer)); int padWritten = SymmetricPadding.PadBlock(inputBuffer, outputBuffer, PaddingSizeBytes, PaddingMode); int transformWritten = BasicSymmetricCipher.TransformFinal(outputBuffer.Slice(0, padWritten), outputBuffer); // After padding, we should have an even number of blocks, and the same applies // to the transform. Debug.Assert(padWritten == transformWritten); return transformWritten; } protected override byte[] UncheckedTransformFinalBlock(byte[] inputBuffer, int inputOffset, int inputCount) { int ciphertextLength = SymmetricPadding.GetCiphertextLength(inputCount, PaddingSizeBytes, PaddingMode); byte[] buffer = GC.AllocateUninitializedArray<byte>(ciphertextLength); int written = UncheckedTransformFinalBlock(inputBuffer.AsSpan(inputOffset, inputCount), buffer); Debug.Assert(written == buffer.Length); return buffer; } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Diagnostics; using System.Security.Cryptography; using Internal.Cryptography; namespace System.Security.Cryptography { // // A cross-platform ICryptoTransform implementation for encryption. // // - Implements the various padding algorithms (as we support padding algorithms that the underlying native apis don't.) // // - Parameterized by a BasicSymmetricCipher which encapsulates the algorithm, key, IV, chaining mode, direction of encryption // and the underlying native apis implementing the encryption. // internal sealed class UniversalCryptoEncryptor : UniversalCryptoTransform { public UniversalCryptoEncryptor(PaddingMode paddingMode, BasicSymmetricCipher basicSymmetricCipher) : base(paddingMode, basicSymmetricCipher) { } protected override int UncheckedTransformBlock(ReadOnlySpan<byte> inputBuffer, Span<byte> outputBuffer) { return BasicSymmetricCipher.Transform(inputBuffer, outputBuffer); } protected override int UncheckedTransformFinalBlock(ReadOnlySpan<byte> inputBuffer, Span<byte> outputBuffer) { // The only caller of this method is the array-allocating overload, outputBuffer is // always new memory, not a user-provided buffer. Debug.Assert(!inputBuffer.Overlaps(outputBuffer)); int padWritten = SymmetricPadding.PadBlock(inputBuffer, outputBuffer, PaddingSizeBytes, PaddingMode); int transformWritten = BasicSymmetricCipher.TransformFinal(outputBuffer.Slice(0, padWritten), outputBuffer); // After padding, we should have an even number of blocks, and the same applies // to the transform. Debug.Assert(padWritten == transformWritten); return transformWritten; } protected override byte[] UncheckedTransformFinalBlock(byte[] inputBuffer, int inputOffset, int inputCount) { int ciphertextLength = SymmetricPadding.GetCiphertextLength(inputCount, PaddingSizeBytes, PaddingMode); byte[] buffer = GC.AllocateUninitializedArray<byte>(ciphertextLength); int written = UncheckedTransformFinalBlock(inputBuffer.AsSpan(inputOffset, inputCount), buffer); Debug.Assert(written == buffer.Length); return buffer; } } }
-1
dotnet/runtime
66,367
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled
When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
jakobbotsch
2022-03-09T00:01:53Z
2022-03-09T12:59:09Z
c9f7f7389e8e9a00d501aef696333b67d218baac
cef825fd5425203ac28d073bf9fccc33c638f179
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled. When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
./src/tests/JIT/HardwareIntrinsics/General/Vector256_1/op_UnaryNegation.Double.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; namespace JIT.HardwareIntrinsics.General { public static partial class Program { private static void op_UnaryNegationDouble() { var test = new VectorUnaryOpTest__op_UnaryNegationDouble(); // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); // Validates passing a static member works test.RunClsVarScenario(); // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); // Validates passing the field of a local class works test.RunClassLclFldScenario(); // Validates passing an instance member of a class works test.RunClassFldScenario(); // Validates passing the field of a local struct works test.RunStructLclFldScenario(); // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class VectorUnaryOpTest__op_UnaryNegationDouble { private struct DataTable { private byte[] inArray1; private byte[] outArray; private GCHandle inHandle1; private GCHandle outHandle; private ulong alignment; public DataTable(Double[] inArray1, Double[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Double>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Double>(); if ((alignment != 32 && alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Double, byte>(ref inArray1[0]), (uint)sizeOfinArray1); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector256<Double> _fld1; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetDouble(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Double>, byte>(ref testStruct._fld1), ref Unsafe.As<Double, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<Double>>()); return testStruct; } public void RunStructFldScenario(VectorUnaryOpTest__op_UnaryNegationDouble testClass) { var result = -_fld1; Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, testClass._dataTable.outArrayPtr); } } private static readonly int LargestVectorSize = 32; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector256<Double>>() / sizeof(Double); private static readonly int RetElementCount = Unsafe.SizeOf<Vector256<Double>>() / sizeof(Double); private static Double[] _data1 = new Double[Op1ElementCount]; private static Vector256<Double> _clsVar1; private Vector256<Double> _fld1; private DataTable _dataTable; static VectorUnaryOpTest__op_UnaryNegationDouble() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetDouble(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Double>, byte>(ref _clsVar1), ref Unsafe.As<Double, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<Double>>()); } public VectorUnaryOpTest__op_UnaryNegationDouble() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetDouble(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Double>, byte>(ref _fld1), ref Unsafe.As<Double, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<Double>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetDouble(); } _dataTable = new DataTable(_data1, new Double[RetElementCount], LargestVectorSize); } public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = -Unsafe.Read<Vector256<Double>>(_dataTable.inArray1Ptr); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(Vector256<Double>).GetMethod("op_UnaryNegation", new Type[] { typeof(Vector256<Double>) }) .Invoke(null, new object[] { Unsafe.Read<Vector256<Double>>(_dataTable.inArray1Ptr) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector256<Double>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = -_clsVar1; Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _dataTable.outArrayPtr); } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector256<Double>>(_dataTable.inArray1Ptr); var result = -op1; Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new VectorUnaryOpTest__op_UnaryNegationDouble(); var result = -test._fld1; Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, _dataTable.outArrayPtr); } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = -_fld1; Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _dataTable.outArrayPtr); } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = -test._fld1; Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } private void ValidateResult(Vector256<Double> op1, void* result, [CallerMemberName] string method = "") { Double[] inArray1 = new Double[Op1ElementCount]; Double[] outArray = new Double[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Double, byte>(ref inArray1[0]), op1); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Double, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector256<Double>>()); ValidateResult(inArray1, outArray, method); } private void ValidateResult(void* op1, void* result, [CallerMemberName] string method = "") { Double[] inArray1 = new Double[Op1ElementCount]; Double[] outArray = new Double[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Double, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector256<Double>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Double, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector256<Double>>()); ValidateResult(inArray1, outArray, method); } private void ValidateResult(Double[] firstOp, Double[] result, [CallerMemberName] string method = "") { bool succeeded = true; if (result[0] != (double)(0 - firstOp[0])) { succeeded = false; } else { for (var i = 1; i < RetElementCount; i++) { if (result[i] != (double)(0 - firstOp[i])) { succeeded = false; break; } } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(Vector256)}.op_UnaryNegation<Double>(Vector256<Double>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; namespace JIT.HardwareIntrinsics.General { public static partial class Program { private static void op_UnaryNegationDouble() { var test = new VectorUnaryOpTest__op_UnaryNegationDouble(); // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); // Validates passing a static member works test.RunClsVarScenario(); // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); // Validates passing the field of a local class works test.RunClassLclFldScenario(); // Validates passing an instance member of a class works test.RunClassFldScenario(); // Validates passing the field of a local struct works test.RunStructLclFldScenario(); // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class VectorUnaryOpTest__op_UnaryNegationDouble { private struct DataTable { private byte[] inArray1; private byte[] outArray; private GCHandle inHandle1; private GCHandle outHandle; private ulong alignment; public DataTable(Double[] inArray1, Double[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Double>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Double>(); if ((alignment != 32 && alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Double, byte>(ref inArray1[0]), (uint)sizeOfinArray1); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector256<Double> _fld1; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetDouble(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Double>, byte>(ref testStruct._fld1), ref Unsafe.As<Double, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<Double>>()); return testStruct; } public void RunStructFldScenario(VectorUnaryOpTest__op_UnaryNegationDouble testClass) { var result = -_fld1; Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, testClass._dataTable.outArrayPtr); } } private static readonly int LargestVectorSize = 32; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector256<Double>>() / sizeof(Double); private static readonly int RetElementCount = Unsafe.SizeOf<Vector256<Double>>() / sizeof(Double); private static Double[] _data1 = new Double[Op1ElementCount]; private static Vector256<Double> _clsVar1; private Vector256<Double> _fld1; private DataTable _dataTable; static VectorUnaryOpTest__op_UnaryNegationDouble() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetDouble(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Double>, byte>(ref _clsVar1), ref Unsafe.As<Double, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<Double>>()); } public VectorUnaryOpTest__op_UnaryNegationDouble() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetDouble(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Double>, byte>(ref _fld1), ref Unsafe.As<Double, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<Double>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetDouble(); } _dataTable = new DataTable(_data1, new Double[RetElementCount], LargestVectorSize); } public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = -Unsafe.Read<Vector256<Double>>(_dataTable.inArray1Ptr); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(Vector256<Double>).GetMethod("op_UnaryNegation", new Type[] { typeof(Vector256<Double>) }) .Invoke(null, new object[] { Unsafe.Read<Vector256<Double>>(_dataTable.inArray1Ptr) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector256<Double>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = -_clsVar1; Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _dataTable.outArrayPtr); } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector256<Double>>(_dataTable.inArray1Ptr); var result = -op1; Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new VectorUnaryOpTest__op_UnaryNegationDouble(); var result = -test._fld1; Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, _dataTable.outArrayPtr); } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = -_fld1; Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _dataTable.outArrayPtr); } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = -test._fld1; Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } private void ValidateResult(Vector256<Double> op1, void* result, [CallerMemberName] string method = "") { Double[] inArray1 = new Double[Op1ElementCount]; Double[] outArray = new Double[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Double, byte>(ref inArray1[0]), op1); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Double, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector256<Double>>()); ValidateResult(inArray1, outArray, method); } private void ValidateResult(void* op1, void* result, [CallerMemberName] string method = "") { Double[] inArray1 = new Double[Op1ElementCount]; Double[] outArray = new Double[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Double, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector256<Double>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Double, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector256<Double>>()); ValidateResult(inArray1, outArray, method); } private void ValidateResult(Double[] firstOp, Double[] result, [CallerMemberName] string method = "") { bool succeeded = true; if (result[0] != (double)(0 - firstOp[0])) { succeeded = false; } else { for (var i = 1; i < RetElementCount; i++) { if (result[i] != (double)(0 - firstOp[i])) { succeeded = false; break; } } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(Vector256)}.op_UnaryNegation<Double>(Vector256<Double>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
-1
dotnet/runtime
66,367
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled
When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
jakobbotsch
2022-03-09T00:01:53Z
2022-03-09T12:59:09Z
c9f7f7389e8e9a00d501aef696333b67d218baac
cef825fd5425203ac28d073bf9fccc33c638f179
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled. When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
./src/libraries/System.Linq.Expressions/src/System/Linq/Expressions/ExpressionStringBuilder.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.Collections.ObjectModel; using System.Diagnostics; using System.Diagnostics.CodeAnalysis; using System.Globalization; using System.Reflection; using System.Runtime.CompilerServices; using System.Text; namespace System.Linq.Expressions { internal sealed class ExpressionStringBuilder : ExpressionVisitor { private readonly StringBuilder _out; // Associate every unique label or anonymous parameter in the tree with an integer. // Labels are displayed as UnnamedLabel_#; parameters are displayed as Param_#. private Dictionary<object, int>? _ids; private ExpressionStringBuilder() { _out = new StringBuilder(); } public override string ToString() { return _out.ToString(); } private int GetLabelId(LabelTarget label) => GetId(label); private int GetParamId(ParameterExpression p) => GetId(p); private int GetId(object o) { if (_ids == null) { _ids = new Dictionary<object, int>(); } int id; if (!_ids.TryGetValue(o, out id)) { id = _ids.Count; _ids.Add(o, id); } return id; } #region The printing code private void Out(string? s) { _out.Append(s); } private void Out(char c) { _out.Append(c); } #endregion #region Output an expression tree to a string /// <summary> /// Output a given expression tree to a string. /// </summary> internal static string ExpressionToString(Expression node) { Debug.Assert(node != null); ExpressionStringBuilder esb = new ExpressionStringBuilder(); esb.Visit(node); return esb.ToString(); } internal static string CatchBlockToString(CatchBlock node) { Debug.Assert(node != null); ExpressionStringBuilder esb = new ExpressionStringBuilder(); esb.VisitCatchBlock(node); return esb.ToString(); } internal static string SwitchCaseToString(SwitchCase node) { Debug.Assert(node != null); ExpressionStringBuilder esb = new ExpressionStringBuilder(); esb.VisitSwitchCase(node); return esb.ToString(); } /// <summary> /// Output a given member binding to a string. /// </summary> internal static string MemberBindingToString(MemberBinding node) { Debug.Assert(node != null); ExpressionStringBuilder esb = new ExpressionStringBuilder(); esb.VisitMemberBinding(node); return esb.ToString(); } /// <summary> /// Output a given ElementInit to a string. /// </summary> internal static string ElementInitBindingToString(ElementInit node) { Debug.Assert(node != null); ExpressionStringBuilder esb = new ExpressionStringBuilder(); esb.VisitElementInit(node); return esb.ToString(); } private void VisitExpressions<T>(char open, ReadOnlyCollection<T> expressions, char close) where T : Expression { VisitExpressions(open, expressions, close, ", "); } private void VisitExpressions<T>(char open, ReadOnlyCollection<T> expressions, char close, string seperator) where T : Expression { Out(open); if (expressions != null) { bool isFirst = true; foreach (T e in expressions) { if (isFirst) { isFirst = false; } else { Out(seperator); } Visit(e); } } Out(close); } protected internal override Expression VisitBinary(BinaryExpression node) { if (node.NodeType == ExpressionType.ArrayIndex) { Visit(node.Left); Out('['); Visit(node.Right); Out(']'); } else { string op; switch (node.NodeType) { // AndAlso and OrElse were unintentionally changed in // CLR 4. We changed them to "AndAlso" and "OrElse" to // be 3.5 compatible, but it turns out 3.5 shipped with // "&&" and "||". Oops. case ExpressionType.AndAlso: op = "AndAlso"; break; case ExpressionType.OrElse: op = "OrElse"; break; case ExpressionType.Assign: op = "="; break; case ExpressionType.Equal: op = "=="; break; case ExpressionType.NotEqual: op = "!="; break; case ExpressionType.GreaterThan: op = ">"; break; case ExpressionType.LessThan: op = "<"; break; case ExpressionType.GreaterThanOrEqual: op = ">="; break; case ExpressionType.LessThanOrEqual: op = "<="; break; case ExpressionType.Add: case ExpressionType.AddChecked: op = "+"; break; case ExpressionType.AddAssign: case ExpressionType.AddAssignChecked: op = "+="; break; case ExpressionType.Subtract: case ExpressionType.SubtractChecked: op = "-"; break; case ExpressionType.SubtractAssign: case ExpressionType.SubtractAssignChecked: op = "-="; break; case ExpressionType.Divide: op = "/"; break; case ExpressionType.DivideAssign: op = "/="; break; case ExpressionType.Modulo: op = "%"; break; case ExpressionType.ModuloAssign: op = "%="; break; case ExpressionType.Multiply: case ExpressionType.MultiplyChecked: op = "*"; break; case ExpressionType.MultiplyAssign: case ExpressionType.MultiplyAssignChecked: op = "*="; break; case ExpressionType.LeftShift: op = "<<"; break; case ExpressionType.LeftShiftAssign: op = "<<="; break; case ExpressionType.RightShift: op = ">>"; break; case ExpressionType.RightShiftAssign: op = ">>="; break; case ExpressionType.And: op = IsBool(node) ? "And" : "&"; break; case ExpressionType.AndAssign: op = IsBool(node) ? "&&=" : "&="; break; case ExpressionType.Or: op = IsBool(node) ? "Or" : "|"; break; case ExpressionType.OrAssign: op = IsBool(node) ? "||=" : "|="; break; case ExpressionType.ExclusiveOr: op = "^"; break; case ExpressionType.ExclusiveOrAssign: op = "^="; break; case ExpressionType.Power: op = "**"; break; // This was changed in .NET Core from ^ to ** case ExpressionType.PowerAssign: op = "**="; break; case ExpressionType.Coalesce: op = "??"; break; default: throw new InvalidOperationException(); } Out('('); Visit(node.Left); Out(' '); Out(op); Out(' '); Visit(node.Right); Out(')'); } return node; } protected internal override Expression VisitParameter(ParameterExpression node) { if (node.IsByRef) { Out("ref "); } string? name = node.Name; if (string.IsNullOrEmpty(name)) { Out("Param_" + GetParamId(node)); } else { Out(name); } return node; } protected internal override Expression VisitLambda<T>(Expression<T> node) { if (node.ParameterCount == 1) { // p => body Visit(node.GetParameter(0)); } else { // (p1, p2, ..., pn) => body Out('('); string sep = ", "; for (int i = 0, n = node.ParameterCount; i < n; i++) { if (i > 0) { Out(sep); } Visit(node.GetParameter(i)); } Out(')'); } Out(" => "); Visit(node.Body); return node; } protected internal override Expression VisitListInit(ListInitExpression node) { Visit(node.NewExpression); Out(" {"); for (int i = 0, n = node.Initializers.Count; i < n; i++) { if (i > 0) { Out(", "); } VisitElementInit(node.Initializers[i]); } Out('}'); return node; } protected internal override Expression VisitConditional(ConditionalExpression node) { Out("IIF("); Visit(node.Test); Out(", "); Visit(node.IfTrue); Out(", "); Visit(node.IfFalse); Out(')'); return node; } protected internal override Expression VisitConstant(ConstantExpression node) { if (node.Value != null) { string? sValue = node.Value.ToString(); if (node.Value is string) { Out('\"'); Out(sValue); Out('\"'); } else if (sValue == node.Value.GetType().ToString()) { Out("value("); Out(sValue); Out(')'); } else { Out(sValue); } } else { Out("null"); } return node; } protected internal override Expression VisitDebugInfo(DebugInfoExpression node) { Out($"<DebugInfo({node.Document.FileName}: {node.StartLine}, {node.StartColumn}, {node.EndLine}, {node.EndColumn})>"); return node; } protected internal override Expression VisitRuntimeVariables(RuntimeVariablesExpression node) { VisitExpressions('(', node.Variables, ')'); return node; } // Prints ".instanceField" or "declaringType.staticField" private void OutMember(Expression? instance, MemberInfo member) { if (instance != null) { Visit(instance); } else { // For static members, include the type name Out(member.DeclaringType!.Name); } Out('.'); Out(member.Name); } protected internal override Expression VisitMember(MemberExpression node) { OutMember(node.Expression, node.Member); return node; } protected internal override Expression VisitMemberInit(MemberInitExpression node) { if (node.NewExpression.ArgumentCount == 0 && node.NewExpression.Type.Name.Contains('<')) { // anonymous type constructor Out("new"); } else { Visit(node.NewExpression); } Out(" {"); for (int i = 0, n = node.Bindings.Count; i < n; i++) { MemberBinding b = node.Bindings[i]; if (i > 0) { Out(", "); } VisitMemberBinding(b); } Out('}'); return node; } protected override MemberAssignment VisitMemberAssignment(MemberAssignment assignment) { Out(assignment.Member.Name); Out(" = "); Visit(assignment.Expression); return assignment; } protected override MemberListBinding VisitMemberListBinding(MemberListBinding binding) { Out(binding.Member.Name); Out(" = {"); for (int i = 0, n = binding.Initializers.Count; i < n; i++) { if (i > 0) { Out(", "); } VisitElementInit(binding.Initializers[i]); } Out('}'); return binding; } protected override MemberMemberBinding VisitMemberMemberBinding(MemberMemberBinding binding) { Out(binding.Member.Name); Out(" = {"); for (int i = 0, n = binding.Bindings.Count; i < n; i++) { if (i > 0) { Out(", "); } VisitMemberBinding(binding.Bindings[i]); } Out('}'); return binding; } protected override ElementInit VisitElementInit(ElementInit initializer) { Out(initializer.AddMethod.ToString()); string sep = ", "; Out('('); for (int i = 0, n = initializer.ArgumentCount; i < n; i++) { if (i > 0) { Out(sep); } Visit(initializer.GetArgument(i)); } Out(')'); return initializer; } protected internal override Expression VisitInvocation(InvocationExpression node) { Out("Invoke("); Visit(node.Expression); string sep = ", "; for (int i = 0, n = node.ArgumentCount; i < n; i++) { Out(sep); Visit(node.GetArgument(i)); } Out(')'); return node; } protected internal override Expression VisitMethodCall(MethodCallExpression node) { int start = 0; Expression? ob = node.Object; if (node.Method.GetCustomAttribute(typeof(ExtensionAttribute)) != null) { start = 1; ob = node.GetArgument(0); } if (ob != null) { Visit(ob); Out('.'); } Out(node.Method.Name); Out('('); for (int i = start, n = node.ArgumentCount; i < n; i++) { if (i > start) Out(", "); Visit(node.GetArgument(i)); } Out(')'); return node; } protected internal override Expression VisitNewArray(NewArrayExpression node) { switch (node.NodeType) { case ExpressionType.NewArrayBounds: // new MyType[](expr1, expr2) Out("new "); Out(node.Type.ToString()); VisitExpressions('(', node.Expressions, ')'); break; case ExpressionType.NewArrayInit: // new [] {expr1, expr2} Out("new [] "); VisitExpressions('{', node.Expressions, '}'); break; } return node; } protected internal override Expression VisitNew(NewExpression node) { Out("new "); Out(node.Type.Name); Out('('); ReadOnlyCollection<MemberInfo>? members = node.Members; for (int i = 0; i < node.ArgumentCount; i++) { if (i > 0) { Out(", "); } if (members != null) { string name = members[i].Name; Out(name); Out(" = "); } Visit(node.GetArgument(i)); } Out(')'); return node; } protected internal override Expression VisitTypeBinary(TypeBinaryExpression node) { Out('('); Visit(node.Expression); switch (node.NodeType) { case ExpressionType.TypeIs: Out(" Is "); break; case ExpressionType.TypeEqual: Out(" TypeEqual "); break; } Out(node.TypeOperand.Name); Out(')'); return node; } protected internal override Expression VisitUnary(UnaryExpression node) { switch (node.NodeType) { case ExpressionType.Negate: case ExpressionType.NegateChecked: Out('-'); break; case ExpressionType.Not: Out("Not("); break; case ExpressionType.IsFalse: Out("IsFalse("); break; case ExpressionType.IsTrue: Out("IsTrue("); break; case ExpressionType.OnesComplement: Out("~("); break; case ExpressionType.ArrayLength: Out("ArrayLength("); break; case ExpressionType.Convert: Out("Convert("); break; case ExpressionType.ConvertChecked: Out("ConvertChecked("); break; case ExpressionType.Throw: Out("throw("); break; case ExpressionType.TypeAs: Out('('); break; case ExpressionType.UnaryPlus: Out('+'); break; case ExpressionType.Unbox: Out("Unbox("); break; case ExpressionType.Increment: Out("Increment("); break; case ExpressionType.Decrement: Out("Decrement("); break; case ExpressionType.PreIncrementAssign: Out("++"); break; case ExpressionType.PreDecrementAssign: Out("--"); break; case ExpressionType.Quote: case ExpressionType.PostIncrementAssign: case ExpressionType.PostDecrementAssign: break; default: throw new InvalidOperationException(); } Visit(node.Operand); switch (node.NodeType) { case ExpressionType.Negate: case ExpressionType.NegateChecked: case ExpressionType.UnaryPlus: case ExpressionType.PreDecrementAssign: case ExpressionType.PreIncrementAssign: case ExpressionType.Quote: break; case ExpressionType.TypeAs: Out(" As "); Out(node.Type.Name); Out(')'); break; case ExpressionType.Convert: case ExpressionType.ConvertChecked: Out(", "); Out(node.Type.Name); Out(')'); break; // These were changed in .NET Core to add the type name case ExpressionType.PostIncrementAssign: Out("++"); break; case ExpressionType.PostDecrementAssign: Out("--"); break; default: Out(')'); break; } return node; } protected internal override Expression VisitBlock(BlockExpression node) { Out('{'); foreach (ParameterExpression v in node.Variables) { Out("var "); Visit(v); Out(';'); } Out(" ... }"); return node; } protected internal override Expression VisitDefault(DefaultExpression node) { Out("default("); Out(node.Type.Name); Out(')'); return node; } protected internal override Expression VisitLabel(LabelExpression node) { Out("{ ... } "); DumpLabel(node.Target); Out(':'); return node; } protected internal override Expression VisitGoto(GotoExpression node) { string op = node.Kind switch { GotoExpressionKind.Goto => "goto", GotoExpressionKind.Break => "break", GotoExpressionKind.Continue => "continue", GotoExpressionKind.Return => "return", _ => throw new InvalidOperationException(), }; Out(op); Out(' '); DumpLabel(node.Target); if (node.Value != null) { Out(" ("); Visit(node.Value); Out(")"); } return node; } protected internal override Expression VisitLoop(LoopExpression node) { Out("loop { ... }"); return node; } protected override SwitchCase VisitSwitchCase(SwitchCase node) { Out("case "); VisitExpressions('(', node.TestValues, ')'); Out(": ..."); return node; } protected internal override Expression VisitSwitch(SwitchExpression node) { Out("switch "); Out('('); Visit(node.SwitchValue); Out(") { ... }"); return node; } protected override CatchBlock VisitCatchBlock(CatchBlock node) { Out("catch ("); Out(node.Test.Name); if (!string.IsNullOrEmpty(node.Variable?.Name)) { Out(' '); Out(node.Variable.Name); } Out(") { ... }"); return node; } protected internal override Expression VisitTry(TryExpression node) { Out("try { ... }"); return node; } protected internal override Expression VisitIndex(IndexExpression node) { if (node.Object != null) { Visit(node.Object); } else { Debug.Assert(node.Indexer != null); Out(node.Indexer.DeclaringType!.Name); } if (node.Indexer != null) { Out('.'); Out(node.Indexer.Name); } Out('['); for (int i = 0, n = node.ArgumentCount; i < n; i++) { if (i > 0) Out(", "); Visit(node.GetArgument(i)); } Out(']'); return node; } [UnconditionalSuppressMessage("ReflectionAnalysis", "IL2075:UnrecognizedReflectionPattern", Justification = "The 'ToString' method cannot be trimmed on any Expression type because we are calling Expression.ToString() in this method.")] protected internal override Expression VisitExtension(Expression node) { // Prefer an overridden ToString, if available. MethodInfo toString = node.GetType().GetMethod("ToString", Type.EmptyTypes)!; if (toString.DeclaringType != typeof(Expression) && !toString.IsStatic) { Out(node.ToString()); return node; } Out('['); // For 3.5 subclasses, print the NodeType. // For Extension nodes, print the class name. Out(node.NodeType == ExpressionType.Extension ? node.GetType().FullName : node.NodeType.ToString()); Out(']'); return node; } private void DumpLabel(LabelTarget target) { if (!string.IsNullOrEmpty(target.Name)) { Out(target.Name); } else { int labelId = GetLabelId(target); Out("UnnamedLabel_" + labelId); } } private static bool IsBool(Expression node) { return node.Type == typeof(bool) || node.Type == typeof(bool?); } #endregion } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.Collections.ObjectModel; using System.Diagnostics; using System.Diagnostics.CodeAnalysis; using System.Globalization; using System.Reflection; using System.Runtime.CompilerServices; using System.Text; namespace System.Linq.Expressions { internal sealed class ExpressionStringBuilder : ExpressionVisitor { private readonly StringBuilder _out; // Associate every unique label or anonymous parameter in the tree with an integer. // Labels are displayed as UnnamedLabel_#; parameters are displayed as Param_#. private Dictionary<object, int>? _ids; private ExpressionStringBuilder() { _out = new StringBuilder(); } public override string ToString() { return _out.ToString(); } private int GetLabelId(LabelTarget label) => GetId(label); private int GetParamId(ParameterExpression p) => GetId(p); private int GetId(object o) { if (_ids == null) { _ids = new Dictionary<object, int>(); } int id; if (!_ids.TryGetValue(o, out id)) { id = _ids.Count; _ids.Add(o, id); } return id; } #region The printing code private void Out(string? s) { _out.Append(s); } private void Out(char c) { _out.Append(c); } #endregion #region Output an expression tree to a string /// <summary> /// Output a given expression tree to a string. /// </summary> internal static string ExpressionToString(Expression node) { Debug.Assert(node != null); ExpressionStringBuilder esb = new ExpressionStringBuilder(); esb.Visit(node); return esb.ToString(); } internal static string CatchBlockToString(CatchBlock node) { Debug.Assert(node != null); ExpressionStringBuilder esb = new ExpressionStringBuilder(); esb.VisitCatchBlock(node); return esb.ToString(); } internal static string SwitchCaseToString(SwitchCase node) { Debug.Assert(node != null); ExpressionStringBuilder esb = new ExpressionStringBuilder(); esb.VisitSwitchCase(node); return esb.ToString(); } /// <summary> /// Output a given member binding to a string. /// </summary> internal static string MemberBindingToString(MemberBinding node) { Debug.Assert(node != null); ExpressionStringBuilder esb = new ExpressionStringBuilder(); esb.VisitMemberBinding(node); return esb.ToString(); } /// <summary> /// Output a given ElementInit to a string. /// </summary> internal static string ElementInitBindingToString(ElementInit node) { Debug.Assert(node != null); ExpressionStringBuilder esb = new ExpressionStringBuilder(); esb.VisitElementInit(node); return esb.ToString(); } private void VisitExpressions<T>(char open, ReadOnlyCollection<T> expressions, char close) where T : Expression { VisitExpressions(open, expressions, close, ", "); } private void VisitExpressions<T>(char open, ReadOnlyCollection<T> expressions, char close, string seperator) where T : Expression { Out(open); if (expressions != null) { bool isFirst = true; foreach (T e in expressions) { if (isFirst) { isFirst = false; } else { Out(seperator); } Visit(e); } } Out(close); } protected internal override Expression VisitBinary(BinaryExpression node) { if (node.NodeType == ExpressionType.ArrayIndex) { Visit(node.Left); Out('['); Visit(node.Right); Out(']'); } else { string op; switch (node.NodeType) { // AndAlso and OrElse were unintentionally changed in // CLR 4. We changed them to "AndAlso" and "OrElse" to // be 3.5 compatible, but it turns out 3.5 shipped with // "&&" and "||". Oops. case ExpressionType.AndAlso: op = "AndAlso"; break; case ExpressionType.OrElse: op = "OrElse"; break; case ExpressionType.Assign: op = "="; break; case ExpressionType.Equal: op = "=="; break; case ExpressionType.NotEqual: op = "!="; break; case ExpressionType.GreaterThan: op = ">"; break; case ExpressionType.LessThan: op = "<"; break; case ExpressionType.GreaterThanOrEqual: op = ">="; break; case ExpressionType.LessThanOrEqual: op = "<="; break; case ExpressionType.Add: case ExpressionType.AddChecked: op = "+"; break; case ExpressionType.AddAssign: case ExpressionType.AddAssignChecked: op = "+="; break; case ExpressionType.Subtract: case ExpressionType.SubtractChecked: op = "-"; break; case ExpressionType.SubtractAssign: case ExpressionType.SubtractAssignChecked: op = "-="; break; case ExpressionType.Divide: op = "/"; break; case ExpressionType.DivideAssign: op = "/="; break; case ExpressionType.Modulo: op = "%"; break; case ExpressionType.ModuloAssign: op = "%="; break; case ExpressionType.Multiply: case ExpressionType.MultiplyChecked: op = "*"; break; case ExpressionType.MultiplyAssign: case ExpressionType.MultiplyAssignChecked: op = "*="; break; case ExpressionType.LeftShift: op = "<<"; break; case ExpressionType.LeftShiftAssign: op = "<<="; break; case ExpressionType.RightShift: op = ">>"; break; case ExpressionType.RightShiftAssign: op = ">>="; break; case ExpressionType.And: op = IsBool(node) ? "And" : "&"; break; case ExpressionType.AndAssign: op = IsBool(node) ? "&&=" : "&="; break; case ExpressionType.Or: op = IsBool(node) ? "Or" : "|"; break; case ExpressionType.OrAssign: op = IsBool(node) ? "||=" : "|="; break; case ExpressionType.ExclusiveOr: op = "^"; break; case ExpressionType.ExclusiveOrAssign: op = "^="; break; case ExpressionType.Power: op = "**"; break; // This was changed in .NET Core from ^ to ** case ExpressionType.PowerAssign: op = "**="; break; case ExpressionType.Coalesce: op = "??"; break; default: throw new InvalidOperationException(); } Out('('); Visit(node.Left); Out(' '); Out(op); Out(' '); Visit(node.Right); Out(')'); } return node; } protected internal override Expression VisitParameter(ParameterExpression node) { if (node.IsByRef) { Out("ref "); } string? name = node.Name; if (string.IsNullOrEmpty(name)) { Out("Param_" + GetParamId(node)); } else { Out(name); } return node; } protected internal override Expression VisitLambda<T>(Expression<T> node) { if (node.ParameterCount == 1) { // p => body Visit(node.GetParameter(0)); } else { // (p1, p2, ..., pn) => body Out('('); string sep = ", "; for (int i = 0, n = node.ParameterCount; i < n; i++) { if (i > 0) { Out(sep); } Visit(node.GetParameter(i)); } Out(')'); } Out(" => "); Visit(node.Body); return node; } protected internal override Expression VisitListInit(ListInitExpression node) { Visit(node.NewExpression); Out(" {"); for (int i = 0, n = node.Initializers.Count; i < n; i++) { if (i > 0) { Out(", "); } VisitElementInit(node.Initializers[i]); } Out('}'); return node; } protected internal override Expression VisitConditional(ConditionalExpression node) { Out("IIF("); Visit(node.Test); Out(", "); Visit(node.IfTrue); Out(", "); Visit(node.IfFalse); Out(')'); return node; } protected internal override Expression VisitConstant(ConstantExpression node) { if (node.Value != null) { string? sValue = node.Value.ToString(); if (node.Value is string) { Out('\"'); Out(sValue); Out('\"'); } else if (sValue == node.Value.GetType().ToString()) { Out("value("); Out(sValue); Out(')'); } else { Out(sValue); } } else { Out("null"); } return node; } protected internal override Expression VisitDebugInfo(DebugInfoExpression node) { Out($"<DebugInfo({node.Document.FileName}: {node.StartLine}, {node.StartColumn}, {node.EndLine}, {node.EndColumn})>"); return node; } protected internal override Expression VisitRuntimeVariables(RuntimeVariablesExpression node) { VisitExpressions('(', node.Variables, ')'); return node; } // Prints ".instanceField" or "declaringType.staticField" private void OutMember(Expression? instance, MemberInfo member) { if (instance != null) { Visit(instance); } else { // For static members, include the type name Out(member.DeclaringType!.Name); } Out('.'); Out(member.Name); } protected internal override Expression VisitMember(MemberExpression node) { OutMember(node.Expression, node.Member); return node; } protected internal override Expression VisitMemberInit(MemberInitExpression node) { if (node.NewExpression.ArgumentCount == 0 && node.NewExpression.Type.Name.Contains('<')) { // anonymous type constructor Out("new"); } else { Visit(node.NewExpression); } Out(" {"); for (int i = 0, n = node.Bindings.Count; i < n; i++) { MemberBinding b = node.Bindings[i]; if (i > 0) { Out(", "); } VisitMemberBinding(b); } Out('}'); return node; } protected override MemberAssignment VisitMemberAssignment(MemberAssignment assignment) { Out(assignment.Member.Name); Out(" = "); Visit(assignment.Expression); return assignment; } protected override MemberListBinding VisitMemberListBinding(MemberListBinding binding) { Out(binding.Member.Name); Out(" = {"); for (int i = 0, n = binding.Initializers.Count; i < n; i++) { if (i > 0) { Out(", "); } VisitElementInit(binding.Initializers[i]); } Out('}'); return binding; } protected override MemberMemberBinding VisitMemberMemberBinding(MemberMemberBinding binding) { Out(binding.Member.Name); Out(" = {"); for (int i = 0, n = binding.Bindings.Count; i < n; i++) { if (i > 0) { Out(", "); } VisitMemberBinding(binding.Bindings[i]); } Out('}'); return binding; } protected override ElementInit VisitElementInit(ElementInit initializer) { Out(initializer.AddMethod.ToString()); string sep = ", "; Out('('); for (int i = 0, n = initializer.ArgumentCount; i < n; i++) { if (i > 0) { Out(sep); } Visit(initializer.GetArgument(i)); } Out(')'); return initializer; } protected internal override Expression VisitInvocation(InvocationExpression node) { Out("Invoke("); Visit(node.Expression); string sep = ", "; for (int i = 0, n = node.ArgumentCount; i < n; i++) { Out(sep); Visit(node.GetArgument(i)); } Out(')'); return node; } protected internal override Expression VisitMethodCall(MethodCallExpression node) { int start = 0; Expression? ob = node.Object; if (node.Method.GetCustomAttribute(typeof(ExtensionAttribute)) != null) { start = 1; ob = node.GetArgument(0); } if (ob != null) { Visit(ob); Out('.'); } Out(node.Method.Name); Out('('); for (int i = start, n = node.ArgumentCount; i < n; i++) { if (i > start) Out(", "); Visit(node.GetArgument(i)); } Out(')'); return node; } protected internal override Expression VisitNewArray(NewArrayExpression node) { switch (node.NodeType) { case ExpressionType.NewArrayBounds: // new MyType[](expr1, expr2) Out("new "); Out(node.Type.ToString()); VisitExpressions('(', node.Expressions, ')'); break; case ExpressionType.NewArrayInit: // new [] {expr1, expr2} Out("new [] "); VisitExpressions('{', node.Expressions, '}'); break; } return node; } protected internal override Expression VisitNew(NewExpression node) { Out("new "); Out(node.Type.Name); Out('('); ReadOnlyCollection<MemberInfo>? members = node.Members; for (int i = 0; i < node.ArgumentCount; i++) { if (i > 0) { Out(", "); } if (members != null) { string name = members[i].Name; Out(name); Out(" = "); } Visit(node.GetArgument(i)); } Out(')'); return node; } protected internal override Expression VisitTypeBinary(TypeBinaryExpression node) { Out('('); Visit(node.Expression); switch (node.NodeType) { case ExpressionType.TypeIs: Out(" Is "); break; case ExpressionType.TypeEqual: Out(" TypeEqual "); break; } Out(node.TypeOperand.Name); Out(')'); return node; } protected internal override Expression VisitUnary(UnaryExpression node) { switch (node.NodeType) { case ExpressionType.Negate: case ExpressionType.NegateChecked: Out('-'); break; case ExpressionType.Not: Out("Not("); break; case ExpressionType.IsFalse: Out("IsFalse("); break; case ExpressionType.IsTrue: Out("IsTrue("); break; case ExpressionType.OnesComplement: Out("~("); break; case ExpressionType.ArrayLength: Out("ArrayLength("); break; case ExpressionType.Convert: Out("Convert("); break; case ExpressionType.ConvertChecked: Out("ConvertChecked("); break; case ExpressionType.Throw: Out("throw("); break; case ExpressionType.TypeAs: Out('('); break; case ExpressionType.UnaryPlus: Out('+'); break; case ExpressionType.Unbox: Out("Unbox("); break; case ExpressionType.Increment: Out("Increment("); break; case ExpressionType.Decrement: Out("Decrement("); break; case ExpressionType.PreIncrementAssign: Out("++"); break; case ExpressionType.PreDecrementAssign: Out("--"); break; case ExpressionType.Quote: case ExpressionType.PostIncrementAssign: case ExpressionType.PostDecrementAssign: break; default: throw new InvalidOperationException(); } Visit(node.Operand); switch (node.NodeType) { case ExpressionType.Negate: case ExpressionType.NegateChecked: case ExpressionType.UnaryPlus: case ExpressionType.PreDecrementAssign: case ExpressionType.PreIncrementAssign: case ExpressionType.Quote: break; case ExpressionType.TypeAs: Out(" As "); Out(node.Type.Name); Out(')'); break; case ExpressionType.Convert: case ExpressionType.ConvertChecked: Out(", "); Out(node.Type.Name); Out(')'); break; // These were changed in .NET Core to add the type name case ExpressionType.PostIncrementAssign: Out("++"); break; case ExpressionType.PostDecrementAssign: Out("--"); break; default: Out(')'); break; } return node; } protected internal override Expression VisitBlock(BlockExpression node) { Out('{'); foreach (ParameterExpression v in node.Variables) { Out("var "); Visit(v); Out(';'); } Out(" ... }"); return node; } protected internal override Expression VisitDefault(DefaultExpression node) { Out("default("); Out(node.Type.Name); Out(')'); return node; } protected internal override Expression VisitLabel(LabelExpression node) { Out("{ ... } "); DumpLabel(node.Target); Out(':'); return node; } protected internal override Expression VisitGoto(GotoExpression node) { string op = node.Kind switch { GotoExpressionKind.Goto => "goto", GotoExpressionKind.Break => "break", GotoExpressionKind.Continue => "continue", GotoExpressionKind.Return => "return", _ => throw new InvalidOperationException(), }; Out(op); Out(' '); DumpLabel(node.Target); if (node.Value != null) { Out(" ("); Visit(node.Value); Out(")"); } return node; } protected internal override Expression VisitLoop(LoopExpression node) { Out("loop { ... }"); return node; } protected override SwitchCase VisitSwitchCase(SwitchCase node) { Out("case "); VisitExpressions('(', node.TestValues, ')'); Out(": ..."); return node; } protected internal override Expression VisitSwitch(SwitchExpression node) { Out("switch "); Out('('); Visit(node.SwitchValue); Out(") { ... }"); return node; } protected override CatchBlock VisitCatchBlock(CatchBlock node) { Out("catch ("); Out(node.Test.Name); if (!string.IsNullOrEmpty(node.Variable?.Name)) { Out(' '); Out(node.Variable.Name); } Out(") { ... }"); return node; } protected internal override Expression VisitTry(TryExpression node) { Out("try { ... }"); return node; } protected internal override Expression VisitIndex(IndexExpression node) { if (node.Object != null) { Visit(node.Object); } else { Debug.Assert(node.Indexer != null); Out(node.Indexer.DeclaringType!.Name); } if (node.Indexer != null) { Out('.'); Out(node.Indexer.Name); } Out('['); for (int i = 0, n = node.ArgumentCount; i < n; i++) { if (i > 0) Out(", "); Visit(node.GetArgument(i)); } Out(']'); return node; } [UnconditionalSuppressMessage("ReflectionAnalysis", "IL2075:UnrecognizedReflectionPattern", Justification = "The 'ToString' method cannot be trimmed on any Expression type because we are calling Expression.ToString() in this method.")] protected internal override Expression VisitExtension(Expression node) { // Prefer an overridden ToString, if available. MethodInfo toString = node.GetType().GetMethod("ToString", Type.EmptyTypes)!; if (toString.DeclaringType != typeof(Expression) && !toString.IsStatic) { Out(node.ToString()); return node; } Out('['); // For 3.5 subclasses, print the NodeType. // For Extension nodes, print the class name. Out(node.NodeType == ExpressionType.Extension ? node.GetType().FullName : node.NodeType.ToString()); Out(']'); return node; } private void DumpLabel(LabelTarget target) { if (!string.IsNullOrEmpty(target.Name)) { Out(target.Name); } else { int labelId = GetLabelId(target); Out("UnnamedLabel_" + labelId); } } private static bool IsBool(Expression node) { return node.Type == typeof(bool) || node.Type == typeof(bool?); } #endregion } }
-1
dotnet/runtime
66,367
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled
When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
jakobbotsch
2022-03-09T00:01:53Z
2022-03-09T12:59:09Z
c9f7f7389e8e9a00d501aef696333b67d218baac
cef825fd5425203ac28d073bf9fccc33c638f179
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled. When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
./src/libraries/System.Reflection.Metadata/tests/Utilities/BlobReaderTests.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Reflection.Internal; using System.Text; using Xunit; namespace System.Reflection.Metadata.Tests { public class BlobReaderTests { [Fact] public unsafe void Properties() { byte[] buffer = new byte[] { 0, 1, 0, 2, 5, 6 }; fixed (byte* bufferPtr = buffer) { var reader = new BlobReader(bufferPtr, 4); Assert.True(reader.StartPointer == bufferPtr); Assert.True(reader.CurrentPointer == bufferPtr); Assert.Equal(0, reader.Offset); Assert.Equal(4, reader.RemainingBytes); Assert.Equal(4, reader.Length); Assert.Equal(0, reader.ReadByte()); Assert.True(reader.StartPointer == bufferPtr); Assert.True(reader.CurrentPointer == bufferPtr + 1); Assert.Equal(1, reader.Offset); Assert.Equal(3, reader.RemainingBytes); Assert.Equal(4, reader.Length); Assert.Equal(1, reader.ReadInt16()); Assert.True(reader.StartPointer == bufferPtr); Assert.True(reader.CurrentPointer == bufferPtr + 3); Assert.Equal(3, reader.Offset); Assert.Equal(1, reader.RemainingBytes); Assert.Equal(4, reader.Length); Assert.Throws<BadImageFormatException>(() => reader.ReadInt16()); Assert.True(reader.StartPointer == bufferPtr); Assert.True(reader.CurrentPointer == bufferPtr + 3); Assert.Equal(3, reader.Offset); Assert.Equal(1, reader.RemainingBytes); Assert.Equal(4, reader.Length); Assert.Equal(2, reader.ReadByte()); Assert.True(reader.StartPointer == bufferPtr); Assert.True(reader.CurrentPointer == bufferPtr + 4); Assert.Equal(4, reader.Offset); Assert.Equal(0, reader.RemainingBytes); Assert.Equal(4, reader.Length); } } [Fact] public unsafe void Offset() { byte[] buffer = new byte[] { 0, 1, 0, 2, 5, 6 }; fixed (byte* bufferPtr = buffer) { var reader = new BlobReader(bufferPtr, 4); Assert.Equal(0, reader.Offset); reader.Offset = 0; Assert.Equal(0, reader.Offset); Assert.Equal(4, reader.RemainingBytes); Assert.True(reader.CurrentPointer == bufferPtr); reader.Offset = 3; Assert.Equal(3, reader.Offset); Assert.Equal(1, reader.RemainingBytes); Assert.True(reader.CurrentPointer == bufferPtr + 3); reader.Offset = 1; Assert.Equal(1, reader.Offset); Assert.Equal(3, reader.RemainingBytes); Assert.True(reader.CurrentPointer == bufferPtr + 1); Assert.Equal(1, reader.ReadByte()); Assert.Equal(2, reader.Offset); Assert.Equal(2, reader.RemainingBytes); Assert.True(reader.CurrentPointer == bufferPtr + 2); reader.Offset = 4; Assert.Equal(4, reader.Offset); Assert.Equal(0, reader.RemainingBytes); Assert.True(reader.CurrentPointer == bufferPtr + 4); Assert.Throws<BadImageFormatException>(() => reader.Offset = 5); Assert.Equal(4, reader.Offset); Assert.Equal(0, reader.RemainingBytes); Assert.True(reader.CurrentPointer == bufferPtr + 4); Assert.Throws<BadImageFormatException>(() => reader.Offset = -1); Assert.Equal(4, reader.Offset); Assert.Equal(0, reader.RemainingBytes); Assert.True(reader.CurrentPointer == bufferPtr + 4); Assert.Throws<BadImageFormatException>(() => reader.Offset = int.MaxValue); Assert.Throws<BadImageFormatException>(() => reader.Offset = int.MinValue); } } [Fact] public unsafe void PublicBlobReaderCtorValidatesArgs() { byte* bufferPtrForLambda; byte[] buffer = new byte[4] { 0, 1, 0, 2 }; fixed (byte* bufferPtr = buffer) { bufferPtrForLambda = bufferPtr; Assert.Throws<ArgumentOutOfRangeException>(() => new BlobReader(bufferPtrForLambda, -1)); } Assert.Throws<ArgumentNullException>(() => new BlobReader(null, 1)); Assert.Equal(0, new BlobReader(null, 0).Length); // this is valid Assert.Throws<BadImageFormatException>(() => new BlobReader(null, 0).ReadByte()); // but can't read anything non-empty from it... Assert.Same(string.Empty, new BlobReader(null, 0).ReadUtf8NullTerminated()); // can read empty string. } [Fact] public unsafe void ReadBoolean1() { byte[] buffer = new byte[] { 1, 0xff, 0, 2 }; fixed (byte* bufferPtr = buffer) { var reader = new BlobReader(new MemoryBlock(bufferPtr, buffer.Length)); Assert.True(reader.ReadBoolean()); Assert.True(reader.ReadBoolean()); Assert.False(reader.ReadBoolean()); Assert.True(reader.ReadBoolean()); } } [Fact] public unsafe void ReadFromMemoryReader() { byte[] buffer = new byte[4] { 0, 1, 0, 2 }; fixed (byte* bufferPtr = buffer) { var reader = new BlobReader(new MemoryBlock(bufferPtr, buffer.Length)); Assert.Equal(0, reader.Offset); Assert.Throws<BadImageFormatException>(() => reader.ReadUInt64()); Assert.Equal(0, reader.Offset); reader.Offset = 1; Assert.Throws<BadImageFormatException>(() => reader.ReadDouble()); Assert.Equal(1, reader.Offset); reader.Offset = 2; Assert.Throws<BadImageFormatException>(() => reader.ReadUInt32()); Assert.Equal((ushort)0x0200, reader.ReadUInt16()); Assert.Equal(4, reader.Offset); reader.Offset = 2; Assert.Throws<BadImageFormatException>(() => reader.ReadSingle()); Assert.Equal(2, reader.Offset); reader.Offset = 0; Assert.Equal(9.404242E-38F, reader.ReadSingle()); Assert.Equal(4, reader.Offset); reader.Offset = 3; Assert.Throws<BadImageFormatException>(() => reader.ReadUInt16()); Assert.Equal((byte)0x02, reader.ReadByte()); Assert.Equal(4, reader.Offset); reader.Offset = 0; Assert.Equal("\u0000\u0001\u0000\u0002", reader.ReadUTF8(4)); Assert.Equal(4, reader.Offset); reader.Offset = 0; Assert.Throws<BadImageFormatException>(() => reader.ReadUTF8(5)); Assert.Equal(0, reader.Offset); reader.Offset = 0; Assert.Throws<BadImageFormatException>(() => reader.ReadUTF8(-1)); Assert.Equal(0, reader.Offset); reader.Offset = 0; Assert.Equal("\u0100\u0200", reader.ReadUTF16(4)); Assert.Equal(4, reader.Offset); reader.Offset = 0; Assert.Throws<BadImageFormatException>(() => reader.ReadUTF16(5)); Assert.Equal(0, reader.Offset); reader.Offset = 0; Assert.Throws<BadImageFormatException>(() => reader.ReadUTF16(-1)); Assert.Equal(0, reader.Offset); reader.Offset = 0; Assert.Throws<BadImageFormatException>(() => reader.ReadUTF16(6)); Assert.Equal(0, reader.Offset); reader.Offset = 0; Assert.Equal(buffer, reader.ReadBytes(4)); Assert.Equal(4, reader.Offset); reader.Offset = 0; Assert.Same(string.Empty, reader.ReadUtf8NullTerminated()); Assert.Equal(1, reader.Offset); reader.Offset = 1; Assert.Equal("\u0001", reader.ReadUtf8NullTerminated()); Assert.Equal(3, reader.Offset); reader.Offset = 3; Assert.Equal("\u0002", reader.ReadUtf8NullTerminated()); Assert.Equal(4, reader.Offset); reader.Offset = 0; Assert.Same(string.Empty, reader.ReadUtf8NullTerminated()); Assert.Equal(1, reader.Offset); reader.Offset = 0; Assert.Throws<BadImageFormatException>(() => reader.ReadBytes(5)); Assert.Equal(0, reader.Offset); reader.Offset = 0; Assert.Throws<BadImageFormatException>(() => reader.ReadBytes(int.MinValue)); Assert.Equal(0, reader.Offset); reader.Offset = 0; Assert.Throws<BadImageFormatException>(() => reader.GetMemoryBlockAt(-1, 1)); Assert.Equal(0, reader.Offset); reader.Offset = 0; Assert.Throws<BadImageFormatException>(() => reader.GetMemoryBlockAt(1, -1)); Assert.Equal(0, reader.Offset); reader.Offset = 0; Assert.Equal(3, reader.GetMemoryBlockAt(1, 3).Length); Assert.Equal(0, reader.Offset); reader.Offset = 3; reader.ReadByte(); Assert.Equal(4, reader.Offset); reader.Offset = 4; Assert.Equal(0, reader.ReadBytes(0).Length); reader.Offset = 4; int value; Assert.False(reader.TryReadCompressedInteger(out value)); Assert.Equal(BlobReader.InvalidCompressedInteger, value); reader.Offset = 4; Assert.Throws<BadImageFormatException>(() => reader.ReadCompressedInteger()); reader.Offset = 4; Assert.Equal(SerializationTypeCode.Invalid, reader.ReadSerializationTypeCode()); reader.Offset = 4; Assert.Equal(SignatureTypeCode.Invalid, reader.ReadSignatureTypeCode()); reader.Offset = 4; Assert.Equal(default(EntityHandle), reader.ReadTypeHandle()); reader.Offset = 4; Assert.Throws<BadImageFormatException>(() => reader.ReadBoolean()); reader.Offset = 4; Assert.Throws<BadImageFormatException>(() => reader.ReadByte()); reader.Offset = 4; Assert.Throws<BadImageFormatException>(() => reader.ReadSByte()); reader.Offset = 4; Assert.Throws<BadImageFormatException>(() => reader.ReadUInt32()); reader.Offset = 4; Assert.Throws<BadImageFormatException>(() => reader.ReadInt32()); reader.Offset = 4; Assert.Throws<BadImageFormatException>(() => reader.ReadUInt64()); reader.Offset = 4; Assert.Throws<BadImageFormatException>(() => reader.ReadInt64()); reader.Offset = 4; Assert.Throws<BadImageFormatException>(() => reader.ReadSingle()); reader.Offset = 4; Assert.Throws<BadImageFormatException>(() => reader.ReadDouble()); reader.Offset = 4; } byte[] buffer2 = new byte[8] { 1, 2, 3, 4, 5, 6, 7, 8 }; fixed (byte* bufferPtr2 = buffer2) { var reader = new BlobReader(new MemoryBlock(bufferPtr2, buffer2.Length)); Assert.Equal(0, reader.Offset); Assert.Equal(0x0807060504030201UL, reader.ReadUInt64()); Assert.Equal(8, reader.Offset); reader.Reset(); Assert.Equal(0, reader.Offset); Assert.Equal(0x0807060504030201L, reader.ReadInt64()); reader.Reset(); Assert.Equal(0, reader.Offset); Assert.Equal(BitConverter.Int64BitsToDouble(0x0807060504030201L), reader.ReadDouble()); } } [Fact] public unsafe void ValidatePeekReferenceSize() { byte[] buffer = new byte[8] { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x01 }; fixed (byte* bufferPtr = buffer) { var block = new MemoryBlock(bufferPtr, buffer.Length); // small ref size always fits in 16 bits Assert.Equal(0xFFFF, block.PeekReference(0, smallRefSize: true)); Assert.Equal(0xFFFF, block.PeekReference(4, smallRefSize: true)); Assert.Equal(0xFFFFU, block.PeekTaggedReference(0, smallRefSize: true)); Assert.Equal(0xFFFFU, block.PeekTaggedReference(4, smallRefSize: true)); Assert.Equal(0x01FFU, block.PeekTaggedReference(6, smallRefSize: true)); // large ref size throws on > RIDMask when tagged variant is not used. Assert.Throws<BadImageFormatException>(() => block.PeekReference(0, smallRefSize: false)); Assert.Throws<BadImageFormatException>(() => block.PeekReference(4, smallRefSize: false)); // large ref size does not throw when Tagged variant is used. Assert.Equal(0xFFFFFFFFU, block.PeekTaggedReference(0, smallRefSize: false)); Assert.Equal(0x01FFFFFFU, block.PeekTaggedReference(4, smallRefSize: false)); // bounds check applies in all cases Assert.Throws<BadImageFormatException>(() => block.PeekReference(7, smallRefSize: true)); Assert.Throws<BadImageFormatException>(() => block.PeekReference(5, smallRefSize: false)); } } [Fact] public unsafe void ReadFromMemoryBlock() { byte[] buffer = new byte[4] { 0, 1, 0, 2 }; fixed (byte* bufferPtr = buffer) { var block = new MemoryBlock(bufferPtr, buffer.Length); Assert.Throws<BadImageFormatException>(() => block.PeekUInt32(int.MaxValue)); Assert.Throws<BadImageFormatException>(() => block.PeekUInt32(-1)); Assert.Throws<BadImageFormatException>(() => block.PeekUInt32(int.MinValue)); Assert.Throws<BadImageFormatException>(() => block.PeekUInt32(4)); Assert.Throws<BadImageFormatException>(() => block.PeekUInt32(1)); Assert.Equal(0x02000100U, block.PeekUInt32(0)); Assert.Throws<BadImageFormatException>(() => block.PeekUInt16(int.MaxValue)); Assert.Throws<BadImageFormatException>(() => block.PeekUInt16(-1)); Assert.Throws<BadImageFormatException>(() => block.PeekUInt16(int.MinValue)); Assert.Throws<BadImageFormatException>(() => block.PeekUInt16(4)); Assert.Equal(0x0200, block.PeekUInt16(2)); int bytesRead; MetadataStringDecoder stringDecoder = MetadataStringDecoder.DefaultUTF8; Assert.Throws<BadImageFormatException>(() => block.PeekUtf8NullTerminated(int.MaxValue, null, stringDecoder, out bytesRead)); Assert.Throws<BadImageFormatException>(() => block.PeekUtf8NullTerminated(-1, null, stringDecoder, out bytesRead)); Assert.Throws<BadImageFormatException>(() => block.PeekUtf8NullTerminated(int.MinValue, null, stringDecoder, out bytesRead)); Assert.Throws<BadImageFormatException>(() => block.PeekUtf8NullTerminated(5, null, stringDecoder, out bytesRead)); Assert.Throws<BadImageFormatException>(() => block.GetMemoryBlockAt(-1, 1)); Assert.Throws<BadImageFormatException>(() => block.GetMemoryBlockAt(1, -1)); Assert.Throws<BadImageFormatException>(() => block.GetMemoryBlockAt(0, -1)); Assert.Throws<BadImageFormatException>(() => block.GetMemoryBlockAt(-1, 0)); Assert.Throws<BadImageFormatException>(() => block.GetMemoryBlockAt(-int.MaxValue, int.MaxValue)); Assert.Throws<BadImageFormatException>(() => block.GetMemoryBlockAt(int.MaxValue, -int.MaxValue)); Assert.Throws<BadImageFormatException>(() => block.GetMemoryBlockAt(int.MaxValue, int.MaxValue)); Assert.Throws<BadImageFormatException>(() => block.GetMemoryBlockAt(block.Length, -1)); Assert.Throws<BadImageFormatException>(() => block.GetMemoryBlockAt(-1, block.Length)); Assert.Equal("\u0001", block.PeekUtf8NullTerminated(1, null, stringDecoder, out bytesRead)); Assert.Equal(2, bytesRead); Assert.Equal("\u0002", block.PeekUtf8NullTerminated(3, null, stringDecoder, out bytesRead)); Assert.Equal(1, bytesRead); Assert.Equal("", block.PeekUtf8NullTerminated(4, null, stringDecoder, out bytesRead)); Assert.Equal(0, bytesRead); byte[] helloPrefix = Encoding.UTF8.GetBytes("Hello"); Assert.Equal("Hello\u0001", block.PeekUtf8NullTerminated(1, helloPrefix, stringDecoder, out bytesRead)); Assert.Equal(2, bytesRead); Assert.Equal("Hello\u0002", block.PeekUtf8NullTerminated(3, helloPrefix, stringDecoder, out bytesRead)); Assert.Equal(1, bytesRead); Assert.Equal("Hello", block.PeekUtf8NullTerminated(4, helloPrefix, stringDecoder, out bytesRead)); Assert.Equal(0, bytesRead); } } [Fact] public unsafe void IndexOf() { byte[] buffer = new byte[] { 0xF0, 0x90, 0x8D, }; fixed (byte* bufferPtr = buffer) { var reader = new BlobReader(bufferPtr, buffer.Length); Assert.Equal(0, reader.IndexOf(0xF0)); Assert.Equal(1, reader.IndexOf(0x90)); Assert.Equal(2, reader.IndexOf(0x8D)); Assert.Equal(-1, reader.IndexOf(0x8C)); Assert.Equal(-1, reader.IndexOf(0)); Assert.Equal(-1, reader.IndexOf(0xff)); reader.ReadByte(); Assert.Equal(-1, reader.IndexOf(0xF0)); Assert.Equal(0, reader.IndexOf(0x90)); Assert.Equal(1, reader.IndexOf(0x8D)); reader.ReadByte(); Assert.Equal(-1, reader.IndexOf(0xF0)); Assert.Equal(-1, reader.IndexOf(0x90)); Assert.Equal(0, reader.IndexOf(0x8D)); reader.ReadByte(); Assert.Equal(-1, reader.IndexOf(0xF0)); Assert.Equal(-1, reader.IndexOf(0x90)); Assert.Equal(-1, reader.IndexOf(0x8D)); } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Reflection.Internal; using System.Text; using Xunit; namespace System.Reflection.Metadata.Tests { public class BlobReaderTests { [Fact] public unsafe void Properties() { byte[] buffer = new byte[] { 0, 1, 0, 2, 5, 6 }; fixed (byte* bufferPtr = buffer) { var reader = new BlobReader(bufferPtr, 4); Assert.True(reader.StartPointer == bufferPtr); Assert.True(reader.CurrentPointer == bufferPtr); Assert.Equal(0, reader.Offset); Assert.Equal(4, reader.RemainingBytes); Assert.Equal(4, reader.Length); Assert.Equal(0, reader.ReadByte()); Assert.True(reader.StartPointer == bufferPtr); Assert.True(reader.CurrentPointer == bufferPtr + 1); Assert.Equal(1, reader.Offset); Assert.Equal(3, reader.RemainingBytes); Assert.Equal(4, reader.Length); Assert.Equal(1, reader.ReadInt16()); Assert.True(reader.StartPointer == bufferPtr); Assert.True(reader.CurrentPointer == bufferPtr + 3); Assert.Equal(3, reader.Offset); Assert.Equal(1, reader.RemainingBytes); Assert.Equal(4, reader.Length); Assert.Throws<BadImageFormatException>(() => reader.ReadInt16()); Assert.True(reader.StartPointer == bufferPtr); Assert.True(reader.CurrentPointer == bufferPtr + 3); Assert.Equal(3, reader.Offset); Assert.Equal(1, reader.RemainingBytes); Assert.Equal(4, reader.Length); Assert.Equal(2, reader.ReadByte()); Assert.True(reader.StartPointer == bufferPtr); Assert.True(reader.CurrentPointer == bufferPtr + 4); Assert.Equal(4, reader.Offset); Assert.Equal(0, reader.RemainingBytes); Assert.Equal(4, reader.Length); } } [Fact] public unsafe void Offset() { byte[] buffer = new byte[] { 0, 1, 0, 2, 5, 6 }; fixed (byte* bufferPtr = buffer) { var reader = new BlobReader(bufferPtr, 4); Assert.Equal(0, reader.Offset); reader.Offset = 0; Assert.Equal(0, reader.Offset); Assert.Equal(4, reader.RemainingBytes); Assert.True(reader.CurrentPointer == bufferPtr); reader.Offset = 3; Assert.Equal(3, reader.Offset); Assert.Equal(1, reader.RemainingBytes); Assert.True(reader.CurrentPointer == bufferPtr + 3); reader.Offset = 1; Assert.Equal(1, reader.Offset); Assert.Equal(3, reader.RemainingBytes); Assert.True(reader.CurrentPointer == bufferPtr + 1); Assert.Equal(1, reader.ReadByte()); Assert.Equal(2, reader.Offset); Assert.Equal(2, reader.RemainingBytes); Assert.True(reader.CurrentPointer == bufferPtr + 2); reader.Offset = 4; Assert.Equal(4, reader.Offset); Assert.Equal(0, reader.RemainingBytes); Assert.True(reader.CurrentPointer == bufferPtr + 4); Assert.Throws<BadImageFormatException>(() => reader.Offset = 5); Assert.Equal(4, reader.Offset); Assert.Equal(0, reader.RemainingBytes); Assert.True(reader.CurrentPointer == bufferPtr + 4); Assert.Throws<BadImageFormatException>(() => reader.Offset = -1); Assert.Equal(4, reader.Offset); Assert.Equal(0, reader.RemainingBytes); Assert.True(reader.CurrentPointer == bufferPtr + 4); Assert.Throws<BadImageFormatException>(() => reader.Offset = int.MaxValue); Assert.Throws<BadImageFormatException>(() => reader.Offset = int.MinValue); } } [Fact] public unsafe void PublicBlobReaderCtorValidatesArgs() { byte* bufferPtrForLambda; byte[] buffer = new byte[4] { 0, 1, 0, 2 }; fixed (byte* bufferPtr = buffer) { bufferPtrForLambda = bufferPtr; Assert.Throws<ArgumentOutOfRangeException>(() => new BlobReader(bufferPtrForLambda, -1)); } Assert.Throws<ArgumentNullException>(() => new BlobReader(null, 1)); Assert.Equal(0, new BlobReader(null, 0).Length); // this is valid Assert.Throws<BadImageFormatException>(() => new BlobReader(null, 0).ReadByte()); // but can't read anything non-empty from it... Assert.Same(string.Empty, new BlobReader(null, 0).ReadUtf8NullTerminated()); // can read empty string. } [Fact] public unsafe void ReadBoolean1() { byte[] buffer = new byte[] { 1, 0xff, 0, 2 }; fixed (byte* bufferPtr = buffer) { var reader = new BlobReader(new MemoryBlock(bufferPtr, buffer.Length)); Assert.True(reader.ReadBoolean()); Assert.True(reader.ReadBoolean()); Assert.False(reader.ReadBoolean()); Assert.True(reader.ReadBoolean()); } } [Fact] public unsafe void ReadFromMemoryReader() { byte[] buffer = new byte[4] { 0, 1, 0, 2 }; fixed (byte* bufferPtr = buffer) { var reader = new BlobReader(new MemoryBlock(bufferPtr, buffer.Length)); Assert.Equal(0, reader.Offset); Assert.Throws<BadImageFormatException>(() => reader.ReadUInt64()); Assert.Equal(0, reader.Offset); reader.Offset = 1; Assert.Throws<BadImageFormatException>(() => reader.ReadDouble()); Assert.Equal(1, reader.Offset); reader.Offset = 2; Assert.Throws<BadImageFormatException>(() => reader.ReadUInt32()); Assert.Equal((ushort)0x0200, reader.ReadUInt16()); Assert.Equal(4, reader.Offset); reader.Offset = 2; Assert.Throws<BadImageFormatException>(() => reader.ReadSingle()); Assert.Equal(2, reader.Offset); reader.Offset = 0; Assert.Equal(9.404242E-38F, reader.ReadSingle()); Assert.Equal(4, reader.Offset); reader.Offset = 3; Assert.Throws<BadImageFormatException>(() => reader.ReadUInt16()); Assert.Equal((byte)0x02, reader.ReadByte()); Assert.Equal(4, reader.Offset); reader.Offset = 0; Assert.Equal("\u0000\u0001\u0000\u0002", reader.ReadUTF8(4)); Assert.Equal(4, reader.Offset); reader.Offset = 0; Assert.Throws<BadImageFormatException>(() => reader.ReadUTF8(5)); Assert.Equal(0, reader.Offset); reader.Offset = 0; Assert.Throws<BadImageFormatException>(() => reader.ReadUTF8(-1)); Assert.Equal(0, reader.Offset); reader.Offset = 0; Assert.Equal("\u0100\u0200", reader.ReadUTF16(4)); Assert.Equal(4, reader.Offset); reader.Offset = 0; Assert.Throws<BadImageFormatException>(() => reader.ReadUTF16(5)); Assert.Equal(0, reader.Offset); reader.Offset = 0; Assert.Throws<BadImageFormatException>(() => reader.ReadUTF16(-1)); Assert.Equal(0, reader.Offset); reader.Offset = 0; Assert.Throws<BadImageFormatException>(() => reader.ReadUTF16(6)); Assert.Equal(0, reader.Offset); reader.Offset = 0; Assert.Equal(buffer, reader.ReadBytes(4)); Assert.Equal(4, reader.Offset); reader.Offset = 0; Assert.Same(string.Empty, reader.ReadUtf8NullTerminated()); Assert.Equal(1, reader.Offset); reader.Offset = 1; Assert.Equal("\u0001", reader.ReadUtf8NullTerminated()); Assert.Equal(3, reader.Offset); reader.Offset = 3; Assert.Equal("\u0002", reader.ReadUtf8NullTerminated()); Assert.Equal(4, reader.Offset); reader.Offset = 0; Assert.Same(string.Empty, reader.ReadUtf8NullTerminated()); Assert.Equal(1, reader.Offset); reader.Offset = 0; Assert.Throws<BadImageFormatException>(() => reader.ReadBytes(5)); Assert.Equal(0, reader.Offset); reader.Offset = 0; Assert.Throws<BadImageFormatException>(() => reader.ReadBytes(int.MinValue)); Assert.Equal(0, reader.Offset); reader.Offset = 0; Assert.Throws<BadImageFormatException>(() => reader.GetMemoryBlockAt(-1, 1)); Assert.Equal(0, reader.Offset); reader.Offset = 0; Assert.Throws<BadImageFormatException>(() => reader.GetMemoryBlockAt(1, -1)); Assert.Equal(0, reader.Offset); reader.Offset = 0; Assert.Equal(3, reader.GetMemoryBlockAt(1, 3).Length); Assert.Equal(0, reader.Offset); reader.Offset = 3; reader.ReadByte(); Assert.Equal(4, reader.Offset); reader.Offset = 4; Assert.Equal(0, reader.ReadBytes(0).Length); reader.Offset = 4; int value; Assert.False(reader.TryReadCompressedInteger(out value)); Assert.Equal(BlobReader.InvalidCompressedInteger, value); reader.Offset = 4; Assert.Throws<BadImageFormatException>(() => reader.ReadCompressedInteger()); reader.Offset = 4; Assert.Equal(SerializationTypeCode.Invalid, reader.ReadSerializationTypeCode()); reader.Offset = 4; Assert.Equal(SignatureTypeCode.Invalid, reader.ReadSignatureTypeCode()); reader.Offset = 4; Assert.Equal(default(EntityHandle), reader.ReadTypeHandle()); reader.Offset = 4; Assert.Throws<BadImageFormatException>(() => reader.ReadBoolean()); reader.Offset = 4; Assert.Throws<BadImageFormatException>(() => reader.ReadByte()); reader.Offset = 4; Assert.Throws<BadImageFormatException>(() => reader.ReadSByte()); reader.Offset = 4; Assert.Throws<BadImageFormatException>(() => reader.ReadUInt32()); reader.Offset = 4; Assert.Throws<BadImageFormatException>(() => reader.ReadInt32()); reader.Offset = 4; Assert.Throws<BadImageFormatException>(() => reader.ReadUInt64()); reader.Offset = 4; Assert.Throws<BadImageFormatException>(() => reader.ReadInt64()); reader.Offset = 4; Assert.Throws<BadImageFormatException>(() => reader.ReadSingle()); reader.Offset = 4; Assert.Throws<BadImageFormatException>(() => reader.ReadDouble()); reader.Offset = 4; } byte[] buffer2 = new byte[8] { 1, 2, 3, 4, 5, 6, 7, 8 }; fixed (byte* bufferPtr2 = buffer2) { var reader = new BlobReader(new MemoryBlock(bufferPtr2, buffer2.Length)); Assert.Equal(0, reader.Offset); Assert.Equal(0x0807060504030201UL, reader.ReadUInt64()); Assert.Equal(8, reader.Offset); reader.Reset(); Assert.Equal(0, reader.Offset); Assert.Equal(0x0807060504030201L, reader.ReadInt64()); reader.Reset(); Assert.Equal(0, reader.Offset); Assert.Equal(BitConverter.Int64BitsToDouble(0x0807060504030201L), reader.ReadDouble()); } } [Fact] public unsafe void ValidatePeekReferenceSize() { byte[] buffer = new byte[8] { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x01 }; fixed (byte* bufferPtr = buffer) { var block = new MemoryBlock(bufferPtr, buffer.Length); // small ref size always fits in 16 bits Assert.Equal(0xFFFF, block.PeekReference(0, smallRefSize: true)); Assert.Equal(0xFFFF, block.PeekReference(4, smallRefSize: true)); Assert.Equal(0xFFFFU, block.PeekTaggedReference(0, smallRefSize: true)); Assert.Equal(0xFFFFU, block.PeekTaggedReference(4, smallRefSize: true)); Assert.Equal(0x01FFU, block.PeekTaggedReference(6, smallRefSize: true)); // large ref size throws on > RIDMask when tagged variant is not used. Assert.Throws<BadImageFormatException>(() => block.PeekReference(0, smallRefSize: false)); Assert.Throws<BadImageFormatException>(() => block.PeekReference(4, smallRefSize: false)); // large ref size does not throw when Tagged variant is used. Assert.Equal(0xFFFFFFFFU, block.PeekTaggedReference(0, smallRefSize: false)); Assert.Equal(0x01FFFFFFU, block.PeekTaggedReference(4, smallRefSize: false)); // bounds check applies in all cases Assert.Throws<BadImageFormatException>(() => block.PeekReference(7, smallRefSize: true)); Assert.Throws<BadImageFormatException>(() => block.PeekReference(5, smallRefSize: false)); } } [Fact] public unsafe void ReadFromMemoryBlock() { byte[] buffer = new byte[4] { 0, 1, 0, 2 }; fixed (byte* bufferPtr = buffer) { var block = new MemoryBlock(bufferPtr, buffer.Length); Assert.Throws<BadImageFormatException>(() => block.PeekUInt32(int.MaxValue)); Assert.Throws<BadImageFormatException>(() => block.PeekUInt32(-1)); Assert.Throws<BadImageFormatException>(() => block.PeekUInt32(int.MinValue)); Assert.Throws<BadImageFormatException>(() => block.PeekUInt32(4)); Assert.Throws<BadImageFormatException>(() => block.PeekUInt32(1)); Assert.Equal(0x02000100U, block.PeekUInt32(0)); Assert.Throws<BadImageFormatException>(() => block.PeekUInt16(int.MaxValue)); Assert.Throws<BadImageFormatException>(() => block.PeekUInt16(-1)); Assert.Throws<BadImageFormatException>(() => block.PeekUInt16(int.MinValue)); Assert.Throws<BadImageFormatException>(() => block.PeekUInt16(4)); Assert.Equal(0x0200, block.PeekUInt16(2)); int bytesRead; MetadataStringDecoder stringDecoder = MetadataStringDecoder.DefaultUTF8; Assert.Throws<BadImageFormatException>(() => block.PeekUtf8NullTerminated(int.MaxValue, null, stringDecoder, out bytesRead)); Assert.Throws<BadImageFormatException>(() => block.PeekUtf8NullTerminated(-1, null, stringDecoder, out bytesRead)); Assert.Throws<BadImageFormatException>(() => block.PeekUtf8NullTerminated(int.MinValue, null, stringDecoder, out bytesRead)); Assert.Throws<BadImageFormatException>(() => block.PeekUtf8NullTerminated(5, null, stringDecoder, out bytesRead)); Assert.Throws<BadImageFormatException>(() => block.GetMemoryBlockAt(-1, 1)); Assert.Throws<BadImageFormatException>(() => block.GetMemoryBlockAt(1, -1)); Assert.Throws<BadImageFormatException>(() => block.GetMemoryBlockAt(0, -1)); Assert.Throws<BadImageFormatException>(() => block.GetMemoryBlockAt(-1, 0)); Assert.Throws<BadImageFormatException>(() => block.GetMemoryBlockAt(-int.MaxValue, int.MaxValue)); Assert.Throws<BadImageFormatException>(() => block.GetMemoryBlockAt(int.MaxValue, -int.MaxValue)); Assert.Throws<BadImageFormatException>(() => block.GetMemoryBlockAt(int.MaxValue, int.MaxValue)); Assert.Throws<BadImageFormatException>(() => block.GetMemoryBlockAt(block.Length, -1)); Assert.Throws<BadImageFormatException>(() => block.GetMemoryBlockAt(-1, block.Length)); Assert.Equal("\u0001", block.PeekUtf8NullTerminated(1, null, stringDecoder, out bytesRead)); Assert.Equal(2, bytesRead); Assert.Equal("\u0002", block.PeekUtf8NullTerminated(3, null, stringDecoder, out bytesRead)); Assert.Equal(1, bytesRead); Assert.Equal("", block.PeekUtf8NullTerminated(4, null, stringDecoder, out bytesRead)); Assert.Equal(0, bytesRead); byte[] helloPrefix = Encoding.UTF8.GetBytes("Hello"); Assert.Equal("Hello\u0001", block.PeekUtf8NullTerminated(1, helloPrefix, stringDecoder, out bytesRead)); Assert.Equal(2, bytesRead); Assert.Equal("Hello\u0002", block.PeekUtf8NullTerminated(3, helloPrefix, stringDecoder, out bytesRead)); Assert.Equal(1, bytesRead); Assert.Equal("Hello", block.PeekUtf8NullTerminated(4, helloPrefix, stringDecoder, out bytesRead)); Assert.Equal(0, bytesRead); } } [Fact] public unsafe void IndexOf() { byte[] buffer = new byte[] { 0xF0, 0x90, 0x8D, }; fixed (byte* bufferPtr = buffer) { var reader = new BlobReader(bufferPtr, buffer.Length); Assert.Equal(0, reader.IndexOf(0xF0)); Assert.Equal(1, reader.IndexOf(0x90)); Assert.Equal(2, reader.IndexOf(0x8D)); Assert.Equal(-1, reader.IndexOf(0x8C)); Assert.Equal(-1, reader.IndexOf(0)); Assert.Equal(-1, reader.IndexOf(0xff)); reader.ReadByte(); Assert.Equal(-1, reader.IndexOf(0xF0)); Assert.Equal(0, reader.IndexOf(0x90)); Assert.Equal(1, reader.IndexOf(0x8D)); reader.ReadByte(); Assert.Equal(-1, reader.IndexOf(0xF0)); Assert.Equal(-1, reader.IndexOf(0x90)); Assert.Equal(0, reader.IndexOf(0x8D)); reader.ReadByte(); Assert.Equal(-1, reader.IndexOf(0xF0)); Assert.Equal(-1, reader.IndexOf(0x90)); Assert.Equal(-1, reader.IndexOf(0x8D)); } } } }
-1
dotnet/runtime
66,367
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled
When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
jakobbotsch
2022-03-09T00:01:53Z
2022-03-09T12:59:09Z
c9f7f7389e8e9a00d501aef696333b67d218baac
cef825fd5425203ac28d073bf9fccc33c638f179
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled. When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
./src/tests/JIT/Methodical/explicit/coverage/expl_gc_int_1.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Runtime.InteropServices; using Xunit; [StructLayout(LayoutKind.Explicit)] internal class AA { [FieldOffset(7)] public short tmp1; [FieldOffset(8)] public int q; [FieldOffset(37)] public int tmp2; [FieldOffset(44)] public int tmp3; public AA(int qq) { tmp1 = 0; tmp2 = 0; tmp3 = 0; q = qq; } public static AA[] a_init = new AA[101]; public static AA[] a_zero = new AA[101]; public static AA[,,] aa_init = new AA[1, 101, 2]; public static AA[,,] aa_zero = new AA[1, 101, 2]; public static object b_init = new AA(100); public static AA _init, _zero; public static int call_target(int arg) { return arg; } public static int call_target_ref(ref int arg) { return arg; } public void verify() { } public static void verify_all() { a_init[100].verify(); a_zero[100].verify(); aa_init[0, 99, 1].verify(); aa_zero[0, 99, 1].verify(); _init.verify(); _zero.verify(); BB.f_init.verify(); BB.f_zero.verify(); } public static void reset() { a_init[100] = new AA(100); a_zero[100] = new AA(0); aa_init[0, 99, 1] = new AA(100); aa_zero[0, 99, 1] = new AA(0); _init = new AA(100); _zero = new AA(0); BB.f_init = new AA(100); BB.f_zero = new AA(0); } } internal struct BB { public static AA f_init, f_zero; } public static class Test_expl_gc_int_1 { [Fact] public static int TestEntrypoint() { return TestApp.RunAllTests(); } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Runtime.InteropServices; using Xunit; [StructLayout(LayoutKind.Explicit)] internal class AA { [FieldOffset(7)] public short tmp1; [FieldOffset(8)] public int q; [FieldOffset(37)] public int tmp2; [FieldOffset(44)] public int tmp3; public AA(int qq) { tmp1 = 0; tmp2 = 0; tmp3 = 0; q = qq; } public static AA[] a_init = new AA[101]; public static AA[] a_zero = new AA[101]; public static AA[,,] aa_init = new AA[1, 101, 2]; public static AA[,,] aa_zero = new AA[1, 101, 2]; public static object b_init = new AA(100); public static AA _init, _zero; public static int call_target(int arg) { return arg; } public static int call_target_ref(ref int arg) { return arg; } public void verify() { } public static void verify_all() { a_init[100].verify(); a_zero[100].verify(); aa_init[0, 99, 1].verify(); aa_zero[0, 99, 1].verify(); _init.verify(); _zero.verify(); BB.f_init.verify(); BB.f_zero.verify(); } public static void reset() { a_init[100] = new AA(100); a_zero[100] = new AA(0); aa_init[0, 99, 1] = new AA(100); aa_zero[0, 99, 1] = new AA(0); _init = new AA(100); _zero = new AA(0); BB.f_init = new AA(100); BB.f_zero = new AA(0); } } internal struct BB { public static AA f_init, f_zero; } public static class Test_expl_gc_int_1 { [Fact] public static int TestEntrypoint() { return TestApp.RunAllTests(); } }
-1
dotnet/runtime
66,367
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled
When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
jakobbotsch
2022-03-09T00:01:53Z
2022-03-09T12:59:09Z
c9f7f7389e8e9a00d501aef696333b67d218baac
cef825fd5425203ac28d073bf9fccc33c638f179
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled. When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
./src/coreclr/vm/baseassemblyspec.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // ============================================================ // // BaseAssemblySpec.cpp // // Implements the BaseAssemblySpec class // // ============================================================ #include "common.h" #include "thekey.h" #include "strongnameinternal.h" #include "strongnameholders.h" VOID BaseAssemblySpec::CloneFieldsToStackingAllocator( StackingAllocator* alloc) { CONTRACTL { INSTANCE_CHECK; THROWS; GC_TRIGGERS; MODE_ANY; INJECT_FAULT(ThrowOutOfMemory();); } CONTRACTL_END #if _DEBUG DWORD hash = Hash(); #endif if ((~m_ownedFlags & NAME_OWNED) && m_pAssemblyName) { S_UINT32 len = S_UINT32((DWORD) strlen(m_pAssemblyName)) + S_UINT32(1); if(len.IsOverflow()) COMPlusThrowHR(COR_E_OVERFLOW); LPSTR temp = (LPSTR)alloc->Alloc(len); strcpy_s(temp, len.Value(), m_pAssemblyName); m_pAssemblyName = temp; } if ((~m_ownedFlags & PUBLIC_KEY_OR_TOKEN_OWNED) && m_pbPublicKeyOrToken && m_cbPublicKeyOrToken > 0) { BYTE *temp = (BYTE *)alloc->Alloc(S_UINT32(m_cbPublicKeyOrToken)) ; memcpy(temp, m_pbPublicKeyOrToken, m_cbPublicKeyOrToken); m_pbPublicKeyOrToken = temp; } if ((~m_ownedFlags & LOCALE_OWNED) && m_context.szLocale) { S_UINT32 len = S_UINT32((DWORD) strlen(m_context.szLocale)) + S_UINT32(1); if(len.IsOverflow()) COMPlusThrowHR(COR_E_OVERFLOW); LPSTR temp = (char *)alloc->Alloc(len) ; strcpy_s(temp, len.Value(), m_context.szLocale); m_context.szLocale = temp; } if ((~m_ownedFlags & CODEBASE_OWNED) && m_wszCodeBase) { S_UINT32 len = S_UINT32((DWORD) wcslen(m_wszCodeBase)) + S_UINT32(1); if(len.IsOverflow()) COMPlusThrowHR(COR_E_OVERFLOW); LPWSTR temp = (LPWSTR)alloc->Alloc(len*S_UINT32(sizeof(WCHAR))); wcscpy_s(temp, len.Value(), m_wszCodeBase); m_wszCodeBase = temp; } _ASSERTE(hash == Hash()); } BOOL BaseAssemblySpec::IsCoreLib() { CONTRACTL { THROWS; INSTANCE_CHECK; GC_TRIGGERS; MODE_ANY; INJECT_FAULT(COMPlusThrowOM();); } CONTRACTL_END; if (m_pAssemblyName == NULL) { LPCWSTR file = GetCodeBase(); if (file) { StackSString path(file); PEAssembly::UrlToPath(path); return SystemDomain::System()->IsBaseLibrary(path); } return FALSE; } _ASSERTE(strlen(g_psBaseLibraryName) == CoreLibNameLen); // <TODO>More of bug 213471</TODO> size_t iNameLen = strlen(m_pAssemblyName); return ( (iNameLen >= CoreLibNameLen) && ( (!stricmpUTF8(m_pAssemblyName, g_psBaseLibrary)) || ( (!SString::_strnicmp(m_pAssemblyName, g_psBaseLibraryName, CoreLibNameLen)) && ( (iNameLen == CoreLibNameLen) || (m_pAssemblyName[CoreLibNameLen] == ',') ) ) ) ); } #define CORELIB_PUBLICKEY g_rbTheSilverlightPlatformKey // A satellite assembly for CoreLib is named "System.Private.CoreLib.resources" or // System.Private.CoreLib.debug.resources.dll and uses the same public key as CoreLib. // It does not necessarily have the same version, and the Culture will // always be set to something like "jp-JP". BOOL BaseAssemblySpec::IsCoreLibSatellite() const { CONTRACTL { THROWS; INSTANCE_CHECK; GC_TRIGGERS; MODE_ANY; INJECT_FAULT(COMPlusThrowOM();); } CONTRACTL_END; if (m_pAssemblyName == NULL) { LPCWSTR file = GetCodeBase(); if (file) { StackSString path(file); PEAssembly::UrlToPath(path); return SystemDomain::System()->IsBaseLibrarySatellite(path); } return FALSE; } _ASSERTE(strlen(g_psBaseLibrarySatelliteAssemblyName) == CoreLibSatelliteNameLen); // <TODO>More of bug 213471</TODO> size_t iNameLen = strlen(m_pAssemblyName); // we allow name to be of the form System.Private.CoreLib.resources.dll only BOOL r = ( (m_cbPublicKeyOrToken == sizeof(CORELIB_PUBLICKEY)) && (iNameLen >= CoreLibSatelliteNameLen) && (!SString::_strnicmp(m_pAssemblyName, g_psBaseLibrarySatelliteAssemblyName, CoreLibSatelliteNameLen)) && ( (iNameLen == CoreLibSatelliteNameLen) || (m_pAssemblyName[CoreLibSatelliteNameLen] == ',') ) ); r = r && ( memcmp(m_pbPublicKeyOrToken,CORELIB_PUBLICKEY,sizeof(CORELIB_PUBLICKEY)) == 0); return r; } VOID BaseAssemblySpec::ConvertPublicKeyToToken() { CONTRACTL { INSTANCE_CHECK; THROWS; GC_NOTRIGGER; MODE_ANY; PRECONDITION(HasPublicKey()); } CONTRACTL_END; StrongNameBufferHolder<BYTE> pbPublicKeyToken; DWORD cbPublicKeyToken; IfFailThrow(StrongNameTokenFromPublicKey(m_pbPublicKeyOrToken, m_cbPublicKeyOrToken, &pbPublicKeyToken, &cbPublicKeyToken)); BYTE *temp = new BYTE [cbPublicKeyToken]; memcpy(temp, pbPublicKeyToken, cbPublicKeyToken); if (m_ownedFlags & PUBLIC_KEY_OR_TOKEN_OWNED) delete [] m_pbPublicKeyOrToken; else m_ownedFlags |= PUBLIC_KEY_OR_TOKEN_OWNED; m_pbPublicKeyOrToken = temp; m_cbPublicKeyOrToken = cbPublicKeyToken; m_dwFlags &= ~afPublicKey; } // Similar to BaseAssemblySpec::CompareEx, but allows the ref to be partially specified // Returns TRUE if ref matches def, FALSE otherwise. // // static BOOL BaseAssemblySpec::CompareRefToDef(const BaseAssemblySpec *pRef, const BaseAssemblySpec *pDef) { WRAPPER_NO_CONTRACT; if(pRef->m_wszCodeBase || pDef->m_wszCodeBase) { if(!pRef->m_wszCodeBase || !pDef->m_wszCodeBase) return FALSE; return wcscmp(pRef->m_wszCodeBase,(pDef->m_wszCodeBase)) == 0; } // Compare fields // // name is non-optional // if (pRef->m_pAssemblyName != pDef->m_pAssemblyName && (pRef->m_pAssemblyName == NULL || pDef->m_pAssemblyName == NULL || CompareStrings(pRef->m_pAssemblyName, pDef->m_pAssemblyName))) { return FALSE; } // // public key [token] is non-optional // if (pRef->m_cbPublicKeyOrToken != pDef->m_cbPublicKeyOrToken || memcmp(pRef->m_pbPublicKeyOrToken, pDef->m_pbPublicKeyOrToken, pRef->m_cbPublicKeyOrToken)) { return FALSE; } // // flags are non-optional, except processor architecture, content type, and debuggable attribute bits // DWORD dwFlagsMask = ~(afPA_FullMask | afContentType_Mask | afDebuggableAttributeMask); if ((pRef->m_dwFlags & dwFlagsMask) != (pDef->m_dwFlags & dwFlagsMask)) return FALSE; // To match Fusion behavior, we ignore processor architecture (GetAssemblyNameRefFromMDImport // does not look at architecture part of the flags, and having processor architecture in // InternalsVisibleTo attribute causess META_E_CA_BAD_FRIENDS_ARGS exception). // Content type is optional in pRef. if (!IsAfContentType_Default(pRef->m_dwFlags) && (pRef->m_dwFlags & afContentType_Mask) != (pDef->m_dwFlags & afContentType_Mask)) return FALSE; // // version info is optional in the ref // if (pRef->m_context.usMajorVersion != (USHORT) -1) { if (pRef->m_context.usMajorVersion != pDef->m_context.usMajorVersion) return FALSE; if (pRef->m_context.usMinorVersion != (USHORT) -1) { if (pRef->m_context.usMinorVersion != pDef->m_context.usMinorVersion) return FALSE; if (pRef->m_context.usBuildNumber != (USHORT) -1) { if (pRef->m_context.usBuildNumber != pDef->m_context.usBuildNumber) return FALSE; if (pRef->m_context.usRevisionNumber != (USHORT) -1) { if (pRef->m_context.usRevisionNumber != pDef->m_context.usRevisionNumber) return FALSE; } } } } // // locale info is optional in the ref // if ((pRef->m_context.szLocale != NULL) && (pRef->m_context.szLocale != pDef->m_context.szLocale) && strcmp(pRef->m_context.szLocale, pDef->m_context.szLocale)) { return FALSE; } return TRUE; } // static BOOL BaseAssemblySpec::RefMatchesDef(const BaseAssemblySpec* pRef, const BaseAssemblySpec* pDef) { CONTRACTL { THROWS; GC_NOTRIGGER; MODE_ANY; PRECONDITION(pRef->GetName()!=NULL && pDef->GetName()!=NULL); } CONTRACTL_END; if (pRef->IsStrongNamed()) { if (!pDef->IsStrongNamed()) return FALSE; if(pRef->HasPublicKey()) { // cannot use pRef->CompareEx(pDef) here because it does a full comparison // and the ref may be partial. return CompareRefToDef(pRef, pDef); } else { BaseAssemblySpec defCopy; defCopy.CopyFrom(pDef); defCopy.ConvertPublicKeyToToken(); return CompareRefToDef(pRef, &defCopy); } } else { return (CompareStrings(pRef->GetName(), pDef->GetName())==0); } } VOID BaseAssemblySpec::SetName(SString const & ssName) { CONTRACTL { INSTANCE_CHECK; GC_NOTRIGGER; THROWS; } CONTRACTL_END; if (m_ownedFlags & NAME_OWNED) { delete [] m_pAssemblyName; m_ownedFlags &= ~NAME_OWNED; } m_pAssemblyName = NULL; IfFailThrow(FString::ConvertUnicode_Utf8(ssName.GetUnicode(), & ((LPSTR &) m_pAssemblyName))); m_ownedFlags |= NAME_OWNED; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // ============================================================ // // BaseAssemblySpec.cpp // // Implements the BaseAssemblySpec class // // ============================================================ #include "common.h" #include "thekey.h" #include "strongnameinternal.h" #include "strongnameholders.h" VOID BaseAssemblySpec::CloneFieldsToStackingAllocator( StackingAllocator* alloc) { CONTRACTL { INSTANCE_CHECK; THROWS; GC_TRIGGERS; MODE_ANY; INJECT_FAULT(ThrowOutOfMemory();); } CONTRACTL_END #if _DEBUG DWORD hash = Hash(); #endif if ((~m_ownedFlags & NAME_OWNED) && m_pAssemblyName) { S_UINT32 len = S_UINT32((DWORD) strlen(m_pAssemblyName)) + S_UINT32(1); if(len.IsOverflow()) COMPlusThrowHR(COR_E_OVERFLOW); LPSTR temp = (LPSTR)alloc->Alloc(len); strcpy_s(temp, len.Value(), m_pAssemblyName); m_pAssemblyName = temp; } if ((~m_ownedFlags & PUBLIC_KEY_OR_TOKEN_OWNED) && m_pbPublicKeyOrToken && m_cbPublicKeyOrToken > 0) { BYTE *temp = (BYTE *)alloc->Alloc(S_UINT32(m_cbPublicKeyOrToken)) ; memcpy(temp, m_pbPublicKeyOrToken, m_cbPublicKeyOrToken); m_pbPublicKeyOrToken = temp; } if ((~m_ownedFlags & LOCALE_OWNED) && m_context.szLocale) { S_UINT32 len = S_UINT32((DWORD) strlen(m_context.szLocale)) + S_UINT32(1); if(len.IsOverflow()) COMPlusThrowHR(COR_E_OVERFLOW); LPSTR temp = (char *)alloc->Alloc(len) ; strcpy_s(temp, len.Value(), m_context.szLocale); m_context.szLocale = temp; } if ((~m_ownedFlags & CODEBASE_OWNED) && m_wszCodeBase) { S_UINT32 len = S_UINT32((DWORD) wcslen(m_wszCodeBase)) + S_UINT32(1); if(len.IsOverflow()) COMPlusThrowHR(COR_E_OVERFLOW); LPWSTR temp = (LPWSTR)alloc->Alloc(len*S_UINT32(sizeof(WCHAR))); wcscpy_s(temp, len.Value(), m_wszCodeBase); m_wszCodeBase = temp; } _ASSERTE(hash == Hash()); } BOOL BaseAssemblySpec::IsCoreLib() { CONTRACTL { THROWS; INSTANCE_CHECK; GC_TRIGGERS; MODE_ANY; INJECT_FAULT(COMPlusThrowOM();); } CONTRACTL_END; if (m_pAssemblyName == NULL) { LPCWSTR file = GetCodeBase(); if (file) { StackSString path(file); PEAssembly::UrlToPath(path); return SystemDomain::System()->IsBaseLibrary(path); } return FALSE; } _ASSERTE(strlen(g_psBaseLibraryName) == CoreLibNameLen); // <TODO>More of bug 213471</TODO> size_t iNameLen = strlen(m_pAssemblyName); return ( (iNameLen >= CoreLibNameLen) && ( (!stricmpUTF8(m_pAssemblyName, g_psBaseLibrary)) || ( (!SString::_strnicmp(m_pAssemblyName, g_psBaseLibraryName, CoreLibNameLen)) && ( (iNameLen == CoreLibNameLen) || (m_pAssemblyName[CoreLibNameLen] == ',') ) ) ) ); } #define CORELIB_PUBLICKEY g_rbTheSilverlightPlatformKey // A satellite assembly for CoreLib is named "System.Private.CoreLib.resources" or // System.Private.CoreLib.debug.resources.dll and uses the same public key as CoreLib. // It does not necessarily have the same version, and the Culture will // always be set to something like "jp-JP". BOOL BaseAssemblySpec::IsCoreLibSatellite() const { CONTRACTL { THROWS; INSTANCE_CHECK; GC_TRIGGERS; MODE_ANY; INJECT_FAULT(COMPlusThrowOM();); } CONTRACTL_END; if (m_pAssemblyName == NULL) { LPCWSTR file = GetCodeBase(); if (file) { StackSString path(file); PEAssembly::UrlToPath(path); return SystemDomain::System()->IsBaseLibrarySatellite(path); } return FALSE; } _ASSERTE(strlen(g_psBaseLibrarySatelliteAssemblyName) == CoreLibSatelliteNameLen); // <TODO>More of bug 213471</TODO> size_t iNameLen = strlen(m_pAssemblyName); // we allow name to be of the form System.Private.CoreLib.resources.dll only BOOL r = ( (m_cbPublicKeyOrToken == sizeof(CORELIB_PUBLICKEY)) && (iNameLen >= CoreLibSatelliteNameLen) && (!SString::_strnicmp(m_pAssemblyName, g_psBaseLibrarySatelliteAssemblyName, CoreLibSatelliteNameLen)) && ( (iNameLen == CoreLibSatelliteNameLen) || (m_pAssemblyName[CoreLibSatelliteNameLen] == ',') ) ); r = r && ( memcmp(m_pbPublicKeyOrToken,CORELIB_PUBLICKEY,sizeof(CORELIB_PUBLICKEY)) == 0); return r; } VOID BaseAssemblySpec::ConvertPublicKeyToToken() { CONTRACTL { INSTANCE_CHECK; THROWS; GC_NOTRIGGER; MODE_ANY; PRECONDITION(HasPublicKey()); } CONTRACTL_END; StrongNameBufferHolder<BYTE> pbPublicKeyToken; DWORD cbPublicKeyToken; IfFailThrow(StrongNameTokenFromPublicKey(m_pbPublicKeyOrToken, m_cbPublicKeyOrToken, &pbPublicKeyToken, &cbPublicKeyToken)); BYTE *temp = new BYTE [cbPublicKeyToken]; memcpy(temp, pbPublicKeyToken, cbPublicKeyToken); if (m_ownedFlags & PUBLIC_KEY_OR_TOKEN_OWNED) delete [] m_pbPublicKeyOrToken; else m_ownedFlags |= PUBLIC_KEY_OR_TOKEN_OWNED; m_pbPublicKeyOrToken = temp; m_cbPublicKeyOrToken = cbPublicKeyToken; m_dwFlags &= ~afPublicKey; } // Similar to BaseAssemblySpec::CompareEx, but allows the ref to be partially specified // Returns TRUE if ref matches def, FALSE otherwise. // // static BOOL BaseAssemblySpec::CompareRefToDef(const BaseAssemblySpec *pRef, const BaseAssemblySpec *pDef) { WRAPPER_NO_CONTRACT; if(pRef->m_wszCodeBase || pDef->m_wszCodeBase) { if(!pRef->m_wszCodeBase || !pDef->m_wszCodeBase) return FALSE; return wcscmp(pRef->m_wszCodeBase,(pDef->m_wszCodeBase)) == 0; } // Compare fields // // name is non-optional // if (pRef->m_pAssemblyName != pDef->m_pAssemblyName && (pRef->m_pAssemblyName == NULL || pDef->m_pAssemblyName == NULL || CompareStrings(pRef->m_pAssemblyName, pDef->m_pAssemblyName))) { return FALSE; } // // public key [token] is non-optional // if (pRef->m_cbPublicKeyOrToken != pDef->m_cbPublicKeyOrToken || memcmp(pRef->m_pbPublicKeyOrToken, pDef->m_pbPublicKeyOrToken, pRef->m_cbPublicKeyOrToken)) { return FALSE; } // // flags are non-optional, except processor architecture, content type, and debuggable attribute bits // DWORD dwFlagsMask = ~(afPA_FullMask | afContentType_Mask | afDebuggableAttributeMask); if ((pRef->m_dwFlags & dwFlagsMask) != (pDef->m_dwFlags & dwFlagsMask)) return FALSE; // To match Fusion behavior, we ignore processor architecture (GetAssemblyNameRefFromMDImport // does not look at architecture part of the flags, and having processor architecture in // InternalsVisibleTo attribute causess META_E_CA_BAD_FRIENDS_ARGS exception). // Content type is optional in pRef. if (!IsAfContentType_Default(pRef->m_dwFlags) && (pRef->m_dwFlags & afContentType_Mask) != (pDef->m_dwFlags & afContentType_Mask)) return FALSE; // // version info is optional in the ref // if (pRef->m_context.usMajorVersion != (USHORT) -1) { if (pRef->m_context.usMajorVersion != pDef->m_context.usMajorVersion) return FALSE; if (pRef->m_context.usMinorVersion != (USHORT) -1) { if (pRef->m_context.usMinorVersion != pDef->m_context.usMinorVersion) return FALSE; if (pRef->m_context.usBuildNumber != (USHORT) -1) { if (pRef->m_context.usBuildNumber != pDef->m_context.usBuildNumber) return FALSE; if (pRef->m_context.usRevisionNumber != (USHORT) -1) { if (pRef->m_context.usRevisionNumber != pDef->m_context.usRevisionNumber) return FALSE; } } } } // // locale info is optional in the ref // if ((pRef->m_context.szLocale != NULL) && (pRef->m_context.szLocale != pDef->m_context.szLocale) && strcmp(pRef->m_context.szLocale, pDef->m_context.szLocale)) { return FALSE; } return TRUE; } // static BOOL BaseAssemblySpec::RefMatchesDef(const BaseAssemblySpec* pRef, const BaseAssemblySpec* pDef) { CONTRACTL { THROWS; GC_NOTRIGGER; MODE_ANY; PRECONDITION(pRef->GetName()!=NULL && pDef->GetName()!=NULL); } CONTRACTL_END; if (pRef->IsStrongNamed()) { if (!pDef->IsStrongNamed()) return FALSE; if(pRef->HasPublicKey()) { // cannot use pRef->CompareEx(pDef) here because it does a full comparison // and the ref may be partial. return CompareRefToDef(pRef, pDef); } else { BaseAssemblySpec defCopy; defCopy.CopyFrom(pDef); defCopy.ConvertPublicKeyToToken(); return CompareRefToDef(pRef, &defCopy); } } else { return (CompareStrings(pRef->GetName(), pDef->GetName())==0); } } VOID BaseAssemblySpec::SetName(SString const & ssName) { CONTRACTL { INSTANCE_CHECK; GC_NOTRIGGER; THROWS; } CONTRACTL_END; if (m_ownedFlags & NAME_OWNED) { delete [] m_pAssemblyName; m_ownedFlags &= ~NAME_OWNED; } m_pAssemblyName = NULL; IfFailThrow(FString::ConvertUnicode_Utf8(ssName.GetUnicode(), & ((LPSTR &) m_pAssemblyName))); m_ownedFlags |= NAME_OWNED; }
-1
dotnet/runtime
66,367
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled
When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
jakobbotsch
2022-03-09T00:01:53Z
2022-03-09T12:59:09Z
c9f7f7389e8e9a00d501aef696333b67d218baac
cef825fd5425203ac28d073bf9fccc33c638f179
Fix early stack offset and size computations for ARM32 with FEATURE_FASTTAILCALL enabled. When FEATURE_FASTTAILCALL is enabled we compute stack offsets for parameters early. These are used to check for interference when placing arguments for fast tailcalls. On ARM32 the assigned offsets were wrong in several cases involving alignment and when we have split parameters. @clamp03 this should simplify #66282 significantly. cc @dotnet/jit-contrib PTAL @BruceForstall
./src/tests/JIT/Methodical/divrem/div/u4div_cs_d.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>Full</DebugType> <Optimize>False</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="u4div.cs" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>Full</DebugType> <Optimize>False</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="u4div.cs" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,364
Inject existing object into MEF2
Closes #29400. MEF looks not actively developed today, but this feature has been long required. Is it still open for change? The implementation is directly taken and not optimal. A usage of this is AsmDiff in arcade.
huoyaoyuan
2022-03-08T22:37:36Z
2022-03-16T22:03:42Z
e34e8dd85e7ffaaad8c884de0967cb8d845af5c6
6fdd29a31f50fda711eaa79bef58364ea714b3f4
Inject existing object into MEF2. Closes #29400. MEF looks not actively developed today, but this feature has been long required. Is it still open for change? The implementation is directly taken and not optimal. A usage of this is AsmDiff in arcade.
./src/libraries/System.Composition.TypedParts/src/System.Composition.TypedParts.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <RootNamespace>System.Composition</RootNamespace> <TargetFrameworks>$(NetCoreAppCurrent);$(NetCoreAppMinimum);netstandard2.0;$(NetFrameworkMinimum)</TargetFrameworks> <IsTrimmable>false</IsTrimmable> <IsPackable>true</IsPackable> <StrongNameKeyId>Microsoft</StrongNameKeyId> <PackageDescription>Provides some extension methods for the Managed Extensibility Framework. Commonly Used Types: System.Composition.CompositionContextExtensions System.Composition.Hosting.ContainerConfiguration</PackageDescription> </PropertyGroup> <ItemGroup> <Compile Include="System\Composition\CompositionContextExtensions.cs" /> <Compile Include="System\Composition\Convention\AttributedModelProviderExtensions.cs" /> <Compile Include="System\Composition\Debugging\ContainerConfigurationDebuggerProxy.cs" /> <Compile Include="System\Composition\Debugging\DiscoveredPartDebuggerProxy.cs" /> <Compile Include="System\Composition\Hosting\ContainerConfiguration.cs" /> <Compile Include="System\Composition\TypedParts\ActivationFeatures\ActivationFeature.cs" /> <Compile Include="System\Composition\TypedParts\ActivationFeatures\DisposalFeature.cs" /> <Compile Include="System\Composition\TypedParts\ActivationFeatures\LifetimeFeature.cs" /> <Compile Include="System\Composition\TypedParts\ActivationFeatures\OnImportsSatisfiedFeature.cs" /> <Compile Include="System\Composition\TypedParts\ActivationFeatures\PropertyImportSite.cs" /> <Compile Include="System\Composition\TypedParts\ActivationFeatures\PropertyInjectionFeature.cs" /> <Compile Include="System\Composition\TypedParts\ContractHelpers.cs" /> <Compile Include="System\Composition\TypedParts\Discovery\DiscoveredExport.cs" /> <Compile Include="System\Composition\TypedParts\Discovery\DiscoveredInstanceExport.cs" /> <Compile Include="System\Composition\TypedParts\Discovery\DiscoveredPart.cs" /> <Compile Include="System\Composition\TypedParts\Discovery\DiscoveredPropertyExport.cs" /> <Compile Include="System\Composition\TypedParts\Discovery\ParameterImportSite.cs" /> <Compile Include="System\Composition\TypedParts\Discovery\TypeInspector.cs" /> <Compile Include="System\Composition\TypedParts\ImportInfo.cs" /> <Compile Include="System\Composition\TypedParts\TypedPartExportDescriptorProvider.cs" /> <Compile Include="System\Composition\TypedParts\Util\DirectAttributeContext.cs" /> <Compile Include="$(CoreLibSharedDir)System\Numerics\Hashing\HashHelpers.cs" Link="Common\System\Numerics\Hashing\HashHelpers.cs" /> </ItemGroup> <ItemGroup> <ProjectReference Include="$(LibrariesProjectRoot)System.Composition.AttributedModel\src\System.Composition.AttributedModel.csproj" /> <ProjectReference Include="$(LibrariesProjectRoot)System.Composition.Hosting\src\System.Composition.Hosting.csproj" /> <ProjectReference Include="$(LibrariesProjectRoot)System.Composition.Runtime\src\System.Composition.Runtime.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetFrameworkIdentifier)' == '.NETCoreApp'"> <Reference Include="System.Collections" /> <Reference Include="System.Linq" /> <Reference Include="System.Linq.Expressions" /> <Reference Include="System.Runtime" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <RootNamespace>System.Composition</RootNamespace> <TargetFrameworks>$(NetCoreAppCurrent);$(NetCoreAppMinimum);netstandard2.0;$(NetFrameworkMinimum)</TargetFrameworks> <IsTrimmable>false</IsTrimmable> <IsPackable>true</IsPackable> <StrongNameKeyId>Microsoft</StrongNameKeyId> <PackageDescription>Provides some extension methods for the Managed Extensibility Framework. Commonly Used Types: System.Composition.CompositionContextExtensions System.Composition.Hosting.ContainerConfiguration</PackageDescription> </PropertyGroup> <ItemGroup> <Compile Include="System\Composition\CompositionContextExtensions.cs" /> <Compile Include="System\Composition\Convention\AttributedModelProviderExtensions.cs" /> <Compile Include="System\Composition\Debugging\ContainerConfigurationDebuggerProxy.cs" /> <Compile Include="System\Composition\Debugging\DiscoveredPartDebuggerProxy.cs" /> <Compile Include="System\Composition\Hosting\ContainerConfiguration.cs" /> <Compile Include="System\Composition\Hosting\InstanceExportDescriptorProvider.cs" /> <Compile Include="System\Composition\Hosting\SinglePartExportDescriptorProvider.cs" /> <Compile Include="System\Composition\TypedParts\ActivationFeatures\ActivationFeature.cs" /> <Compile Include="System\Composition\TypedParts\ActivationFeatures\DisposalFeature.cs" /> <Compile Include="System\Composition\TypedParts\ActivationFeatures\LifetimeFeature.cs" /> <Compile Include="System\Composition\TypedParts\ActivationFeatures\OnImportsSatisfiedFeature.cs" /> <Compile Include="System\Composition\TypedParts\ActivationFeatures\PropertyImportSite.cs" /> <Compile Include="System\Composition\TypedParts\ActivationFeatures\PropertyInjectionFeature.cs" /> <Compile Include="System\Composition\TypedParts\ContractHelpers.cs" /> <Compile Include="System\Composition\TypedParts\Discovery\DiscoveredExport.cs" /> <Compile Include="System\Composition\TypedParts\Discovery\DiscoveredInstanceExport.cs" /> <Compile Include="System\Composition\TypedParts\Discovery\DiscoveredPart.cs" /> <Compile Include="System\Composition\TypedParts\Discovery\DiscoveredPropertyExport.cs" /> <Compile Include="System\Composition\TypedParts\Discovery\ParameterImportSite.cs" /> <Compile Include="System\Composition\TypedParts\Discovery\TypeInspector.cs" /> <Compile Include="System\Composition\TypedParts\ImportInfo.cs" /> <Compile Include="System\Composition\TypedParts\TypedPartExportDescriptorProvider.cs" /> <Compile Include="System\Composition\TypedParts\Util\DirectAttributeContext.cs" /> <Compile Include="$(CoreLibSharedDir)System\Numerics\Hashing\HashHelpers.cs" Link="Common\System\Numerics\Hashing\HashHelpers.cs" /> </ItemGroup> <ItemGroup> <ProjectReference Include="$(LibrariesProjectRoot)System.Composition.AttributedModel\src\System.Composition.AttributedModel.csproj" /> <ProjectReference Include="$(LibrariesProjectRoot)System.Composition.Hosting\src\System.Composition.Hosting.csproj" /> <ProjectReference Include="$(LibrariesProjectRoot)System.Composition.Runtime\src\System.Composition.Runtime.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetFrameworkIdentifier)' == '.NETCoreApp'"> <Reference Include="System.Collections" /> <Reference Include="System.Linq" /> <Reference Include="System.Linq.Expressions" /> <Reference Include="System.Runtime" /> </ItemGroup> </Project>
1